Spaces:
Running
on
Zero
Running
on
Zero
File size: 831 Bytes
e9a1511 7529aa7 eaa6aa4 6d028a6 cc5d713 e8b4f94 cc5d713 6d028a6 867a375 ec23033 867a375 448f155 cc5d713 00a0539 7529aa7 eaa6aa4 9030a60 aa41904 866f150 45066df eaa6aa4 448f155 cc5d713 e9a1511 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 |
#
# Simple example.
#
import spaces
from diffusers import DiffusionPipeline
import os
import torch
from transformers import pipeline
import gradio as gr
token = os.getenv("HUGGINGFACE_API_TOKEN")
print(f'HUGGINGFACE_API_TOKEN: {token}')
model = "meta-llama/Meta-Llama-3-8B-Instruct"
model = "ibm-granite/granite-3b-code-instruct"
print(f'Loading model {model}')
pipe = pipeline("text-generation", model, torch_dtype=torch.bfloat16, device_map="auto", token=token)
# pipe.to('cuda')
@spaces.GPU
def generate(prompt):
response = pipe(prompt, max_new_tokens=512)
# r = response[0]['generated_text'][-1]['content']
print(f'Response received!')
r = response[0]['generated_text']
return r
gr.Interface(
fn=generate,
inputs=gr.Text("When is the next solar eclipse?"),
outputs=gr.Text(),
).launch()
|