File size: 1,199 Bytes
c7f8f58
 
2988f29
 
c7f8f58
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2988f29
 
c7f8f58
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig
import gradio as gr

# Load the model and tokenizer
model_name = "deepseek-ai/deepseek-llm-7b-base"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.bfloat16, device_map="auto")
model.generation_config = GenerationConfig.from_pretrained(model_name)
model.generation_config.pad_token_id = model.generation_config.eos_token_id

def generate_response(prompt):
    # Tokenize the input prompt
    inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
    
    # Generate the response
    outputs = model.generate(**inputs, max_new_tokens=100)
    
    # Decode the generated tokens to a string
    response = tokenizer.decode(outputs[0], skip_special_tokens=True)
    
    return response

# Create a Gradio interface
iface = gr.Interface(
    fn=generate_response,  # Function to call
    inputs="text",         # Input type
    outputs="text",        # Output type
    title="DeepSeek 7B Chat",  # Title of the app
    description="A simple chat interface for the DeepSeek 7B model."
)

# Launch the app
iface.launch()