Spaces:
Sleeping
Sleeping
import gradio as gr | |
from transformers import AutoModelForCausalLM, AutoTokenizer | |
import torch | |
tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-small") | |
model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-small") | |
def chatbot(input_text, chat_history): | |
# Encode the new user input, add the eos_token and return a tensor in Pytorch | |
new_user_input_ids = tokenizer.encode(input_text + tokenizer.eos_token, return_tensors='pt') | |
# Append the new user input tokens to the chat history | |
bot_input_ids = torch.cat([torch.tensor(chat_history), new_user_input_ids], dim=-1) if chat_history else new_user_input_ids | |
# Generate a response while limiting the total chat history to 1000 tokens, | |
chat_history_ids = model.generate(bot_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id) | |
# Decode the last output tokens from bot | |
output = tokenizer.decode(chat_history_ids[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True) | |
# Append to chat history for next turn. Important: Return the *full* chat history tensor to Gradio | |
chat_history_tensor = chat_history_ids.tolist() | |
return output, chat_history_tensor | |
iface = gr.ChatInterface( | |
fn=chatbot, | |
title="DialoGPT Chatbot (Small)", | |
description="Simple chat application using microsoft/DialoGPT-small model. Try it out!", | |
examples=["Hello", "How are you?", "Tell me a joke"] | |
) | |
iface.launch() |