Spaces:
Running
Running
import os | |
import gradio as gr | |
from huggingface_hub import InferenceClient | |
import logging | |
logging.basicConfig(level=logging.INFO) | |
logger = logging.getLogger(__name__) | |
model_name = "meta-llama/Llama-3.2-1B" | |
hf_token = os.environ.get("HUGGINGFACE_TOKEN") | |
if not hf_token: | |
logger.error("HUGGINGFACE_TOKEN environment variable is not set") | |
raise ValueError("HUGGINGFACE_TOKEN environment variable is not set") | |
client = InferenceClient(model=model_name, token=hf_token) | |
def generate_text(prompt): | |
try: | |
logger.info(f"Attempting to generate text for prompt: {prompt[:50]}...") | |
response = client.text_generation( | |
prompt, | |
max_new_tokens=500, | |
temperature=0.7, | |
top_k=50, | |
top_p=0.95, | |
do_sample=True | |
) | |
logger.info(f"Generated text: {response[:100]}...") | |
return response | |
except Exception as e: | |
logger.error(f"Error in generate_text: {type(e).__name__}: {str(e)}") | |
return f"An error occurred: {type(e).__name__}: {str(e)}" | |
def generate_email(industry, recipient_role, company_details): | |
try: | |
prompt = f"""Task: Generate a professional cold outreach email. | |
Context: | |
- Industry: {industry} | |
- Recipient Role: {recipient_role} | |
- Company Details: {company_details} | |
Instructions: | |
1. Create a catchy subject line related to the industry and recipient role. | |
2. Write a personalized greeting. | |
3. Introduce yourself and your company briefly. | |
4. Explain how your company can benefit the recipient, using specific details from the company information. | |
5. Suggest a meeting or call to discuss further. | |
6. Thank the recipient and provide your contact information. | |
7. Use a professional closing. | |
Now, write the email following these instructions. Be creative and specific, don't use placeholder text: | |
""" | |
generated_text = generate_text(prompt) | |
# Remove any remaining prompt text if present | |
email_content = generated_text.split("Now, write the email following these instructions.")[-1].strip() | |
logger.info(f"Generated email for {industry}, {recipient_role}") | |
return email_content | |
except Exception as e: | |
logger.error(f"Error in generate_email: {type(e).__name__}: {str(e)}") | |
return "I apologize, but an unexpected error occurred. Please try again later or contact support." | |
def test_model_connection(): | |
try: | |
test_prompt = "Write a short paragraph about the importance of AI in modern business:" | |
response = generate_text(test_prompt) | |
logger.info(f"Test model connection successful. Response: {response}") | |
return "Model connection test successful. Response: " + response | |
except Exception as e: | |
logger.error(f"Test model connection failed: {type(e).__name__}: {str(e)}") | |
return f"Model connection test failed: {type(e).__name__}: {str(e)}" | |
# Gradio interface | |
with gr.Blocks() as demo: | |
gr.Markdown("# EmailGenie: AI-Powered Cold Email Generator") | |
with gr.Tab("Generate Email"): | |
industry = gr.Textbox(label="Industry") | |
recipient_role = gr.Textbox(label="Recipient Role") | |
company_details = gr.Textbox(label="Company/Personal Details", lines=5) | |
generate_button = gr.Button("Generate Email") | |
output = gr.Textbox(label="Generated Email", lines=10) | |
generate_button.click(generate_email, inputs=[industry, recipient_role, company_details], outputs=output) | |
with gr.Tab("Test Connection"): | |
test_button = gr.Button("Test Model Connection") | |
test_output = gr.Textbox(label="Connection Test Result", lines=5) | |
test_button.click(test_model_connection, inputs=None, outputs=test_output) | |
demo.launch() |