Spaces:
Running
on
Zero
Running
on
Zero
File size: 1,076 Bytes
e9a1511 7529aa7 eaa6aa4 6d028a6 cc5d713 e8b4f94 cc5d713 6d028a6 867a375 448f155 d1021eb 76e97f6 7b47ebe 448f155 cc5d713 00a0539 7529aa7 eaa6aa4 9030a60 aa41904 866f150 45066df eaa6aa4 c705142 fd5f60a cad1a80 c705142 af21b88 c705142 eaa6aa4 c705142 cc5d713 c705142 e9a1511 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 |
#
# Simple example.
#
import spaces
from diffusers import DiffusionPipeline
import os
import torch
from transformers import pipeline
import gradio as gr
token = os.getenv("HUGGINGFACE_API_TOKEN")
model = "meta-llama/Meta-Llama-3-8B-Instruct"
model = "instructlab/granite-7b-lab"
model = "ibm/granite-7b-base"
model = "ibm-granite/granite-3b-code-instruct"
print(f'Loading model {model}')
pipe = pipeline("text-generation", model, torch_dtype=torch.bfloat16, device_map="auto", token=token)
# pipe.to('cuda')
@spaces.GPU
def generate(prompt):
response = pipe(prompt, max_new_tokens=512)
# r = response[0]['generated_text'][-1]['content']
print(f'Response received!')
r = response[0]['generated_text']
return r
input_textbox = gr.Textbox(
label="Prompt",
info="Ask me something.",
lines=3,
value="# Write a python function to read a csv file using pandas and print rows 20 through 25."
)
gr.Interface(
fn=generate,
inputs=input_textbox,
outputs=gr.Text(),
title=model
).launch()
|