import streamlit as st
import os
import requests
# Set page config
st.set_page_config(
page_title="T5-small LoRA Summarization",
page_icon="๐",
layout="wide"
)
# Custom CSS
st.markdown("""
""", unsafe_allow_html=True)
# Header
st.markdown("""
๐ T5-small LoRA Summarization Model
Fine-tuned for efficient text summarization using LoRA adapters
""", unsafe_allow_html=True)
def try_direct_loading():
"""Try to load the model directly"""
try:
from transformers import pipeline
summarizer = pipeline(
"summarization",
model="manesh1/t5-small-lora-summarization"
)
return summarizer, "direct"
except Exception as e:
return None, f"Direct loading failed: {str(e)}"
def try_local_loading():
"""Try to load from local files"""
try:
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
# Load from current directory
tokenizer = AutoTokenizer.from_pretrained(".")
model = AutoModelForSeq2SeqLM.from_pretrained(".")
summarizer = pipeline("summarization", model=model, tokenizer=tokenizer)
return summarizer, "local"
except Exception as e:
return None, f"Local loading failed: {str(e)}"
def main():
# Model Information
col1, col2 = st.columns([2, 1])
with col1:
st.header("๐ Model Overview")
st.markdown("""
This model is a **T5-small** architecture fine-tuned with **LoRA (Low-Rank Adaptation)**
specifically for text summarization tasks. The model maintains the efficiency of T5-small
while being optimized for summarization through parameter-efficient fine-tuning.
""")
st.markdown("""
๐ Model Files Status
- โ
adapter_config.json
- โ
adapter_model.safetensors
- โ
tokenizer files
- โ
configuration files
All model files are properly deployed!
""", unsafe_allow_html=True)
with col2:
st.header("๐ง Quick Use")
st.code("""
from transformers import pipeline
summarizer = pipeline(
"summarization",
model="manesh1/t5-small-lora-summarization"
)
""", language="python")
# TRY INFERENCE SECTION
st.markdown("""
๐งช Try the Model Here!
Enter text below to test summarization
""", unsafe_allow_html=True)
# Text input for summarization
input_text = st.text_area(
"Enter text to summarize:",
height=200,
placeholder="Paste your text here to see the model in action...",
key="input_text"
)
col1, col2 = st.columns(2)
with col1:
max_length = st.slider("Maximum summary length", 50, 300, 150)
with col2:
min_length = st.slider("Minimum summary length", 10, 100, 30)
if st.button("๐ Generate Summary", type="primary", use_container_width=True):
if not input_text.strip():
st.warning("Please enter some text to summarize.")
else:
with st.spinner("Attempting to load model and generate summary..."):
# Try different loading methods
summarizer, method = try_direct_loading()
if summarizer is None:
# Try local loading
summarizer, method = try_local_loading()
if summarizer:
st.success(f"โ
Model loaded successfully via {method} method!")
try:
# Generate summary
result = summarizer(
input_text,
max_length=max_length,
min_length=min_length,
do_sample=False
)
summary = result[0]['summary_text']
st.subheader("๐ Generated Summary")
st.success(summary)
# Statistics
col1, col2, col3 = st.columns(3)
with col1:
st.metric("Input Words", len(input_text.split()))
with col2:
st.metric("Summary Words", len(summary.split()))
with col3:
reduction = ((len(input_text.split()) - len(summary.split())) / len(input_text.split())) * 100
st.metric("Reduction", f"{reduction:.1f}%")
except Exception as e:
st.error(f"Error during summarization: {str(e)}")
else:
st.warning("""
**Could not load the model in this environment.**
This is common on Hugging Face Spaces due to PyTorch limitations.
However, your model works perfectly in other environments!
**Try these alternatives:**
- Use the code examples in your local Python environment
- Create a new Space with Gradio interface
- Use the Hugging Face Inference API
""")
# Show what would happen in a working environment
st.info("**In a working environment, your input would be summarized like this:**")
words = input_text.split()
if len(words) > 30:
demo_summary = " ".join(words[:20]) + "... [summary continues]"
st.write(demo_summary)
# Usage Examples
st.header("๐ก Usage Examples")
tab1, tab2, tab3 = st.tabs(["Basic Usage", "Advanced", "API"])
with tab1:
st.markdown("""
```python
from transformers import pipeline
# Load model
summarizer = pipeline(
"summarization",
model="manesh1/t5-small-lora-summarization"
)
# Summarize text
text = \"\"\"Artificial intelligence (AI) is intelligence demonstrated by machines,
as opposed to natural intelligence displayed by animals including humans.
Leading AI textbooks define the field as the study of intelligent agents...\"\"\"
summary = summarizer(
text,
max_length=150,
min_length=30,
do_sample=False
)[0]['summary_text']
print("Summary:", summary)
```
""")
with tab2:
st.markdown("""
```python
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
# Load specific components
tokenizer = AutoTokenizer.from_pretrained(
"manesh1/t5-small-lora-summarization"
)
model = AutoModelForSeq2SeqLM.from_pretrained(
"manesh1/t5-small-lora-summarization"
)
# Custom inference
def summarize_text(text, max_length=150):
inputs = tokenizer("summarize: " + text,
return_tensors="pt",
max_length=512,
truncation=True)
outputs = model.generate(
**inputs,
max_length=max_length,
min_length=30,
num_beams=4,
early_stopping=True
)
summary = tokenizer.decode(outputs[0], skip_special_tokens=True)
return summary
# Usage
text = "Your long text here..."
summary = summarize_text(text)
print(summary)
```
""")
with tab3:
st.markdown("""
```python
import requests
API_URL = "https://api-inference.huggingface.co/models/manesh1/t5-small-lora-summarization"
headers = {"Authorization": "Bearer YOUR_HF_TOKEN"}
def query(payload):
response = requests.post(API_URL, headers=headers, json=payload)
return response.json()
output = query({
"inputs": "Your text here...",
"parameters": {
"max_length": 150,
"min_length": 30,
"do_sample": False
}
})
print(output[0]['summary_text'])
```
""")
# Final notes
st.header("๐ Notes")
st.info("""
**About this Space**: This interface provides multiple ways to use your model.
The direct inference might work depending on the environment's PyTorch availability.
Your model files are complete and ready for use in any compatible environment!
""")
st.success("**Your model is ready to use!** Share this link: https://huggingface.co/manesh1/t5-small-lora-summarization")
if __name__ == "__main__":
main()