Spaces:
Sleeping
Sleeping
import streamlit as st | |
from transformers import BartTokenizer, BartForConditionalGeneration | |
import torch | |
import os | |
# Path to the folder containing the saved model | |
model_path = './Bart' # Update this path if your model is in a different folder | |
# Ensure the model and tokenizer are loaded from the specified folder | |
model = BartForConditionalGeneration.from_pretrained(model_path) | |
tokenizer = BartTokenizer.from_pretrained(model_path) | |
st.title("BART Summarization") | |
input_text = st.text_area("Enter text to summarize", "") | |
if st.button("Summarize"): | |
if input_text: | |
inputs = tokenizer(input_text, return_tensors="pt", max_length=512, truncation=True) | |
# Set the device (CPU or GPU) | |
device = "cuda" if torch.cuda.is_available() else "cpu" | |
model.to(device) | |
inputs = {key: value.to(device) for key, value in inputs.items()} | |
# Generate summary | |
summary_ids = model.generate(inputs['input_ids'], max_length=150, min_length=40, length_penalty=2.0, num_beams=4, early_stopping=True) | |
summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True) | |
st.write("Summary:", summary) | |
else: | |
st.write("Please enter text to summarize.") | |