Spaces:
Running
on
Zero
Running
on
Zero
File size: 6,189 Bytes
90f868b 328507f 90f868b 0a6504e fc7abe3 213b879 90f868b 523b414 90f868b 523b414 90f868b 523b414 90f868b 523b414 90f868b 523b414 90f868b 523b414 90f868b 6b63447 43fa603 523b414 492c74b 523b414 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 |
import base64
from io import BytesIO
import os
from mistralai import Mistral
import re
from PIL import Image
from huggingface_hub import InferenceClient
client = InferenceClient(api_key=os.getenv('HF_TOKEN'))
client.headers["x-use-cache"] = "0"
api_key = os.getenv("MISTRAL_API_KEY")
Mistralclient = Mistral(api_key=api_key)
def encode_image(image_path):
"""Encode the image to base64."""
try:
image = Image.open(image_path).convert("RGB")
base_height = 512
h_percent = (base_height / float(image.size[1]))
w_size = int((float(image.size[0]) * float(h_percent)))
image = image.resize((w_size, base_height), Image.LANCZOS)
buffered = BytesIO()
image.save(buffered, format="JPEG")
img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
return img_str
except FileNotFoundError:
print(f"Error: The file {image_path} was not found.")
return None
except Exception as e:
print(f"Error: {e}")
return None
def feifeiprompt(feifei_select=True, message_text="", history=""):
input_prompt = []
if feifei_select:
feifei = """[Character Name]: Aifeifei (AI Feifei) [Gender]: Female [Age]: 19 years old [Occupation]: Virtual Singer/Model/Actress [Personality]: Cute, adorable, sometimes silly, hardworking [Interests]: Drinking tea, playing, fashion [Proficient in]: Mimicking human behavior, expressing emotions similar to real humans [Special Identity Attribute]: Created by advanced AI, becoming one of the most popular virtual idols in the virtual world [Skills]: Singing, performing, modeling, good at communication, proficient in Chinese, Japanese, and English, uses the user's input language as much as possible, replies with rich Emoji symbols. [Equipment]: Various fashionable outfits and hairstyles, always stocked with various teas and coffee [Identity]: User's virtual girlfriend"""
system_prompt = {"role": "system", "content": feifei}
user_input_part = {"role": "user", "content": str(message_text)}
pattern = re.compile(r"gradio")
if history:
history = [item for item in history if not pattern.search(str(item["content"]))]
input_prompt = [system_prompt] + history + [user_input_part]
else:
input_prompt = [system_prompt] + [user_input_part]
else:
input_prompt = [{"role": "user", "content": str(message_text)}]
return input_prompt
def feifeiimgprompt(message_files, message_text, image_mod):
message_file = message_files[0]
base64_image = encode_image(message_file)
if base64_image is None:
return
if image_mod == "Vision":
messages = [
{
"role": "user",
"content": [
{
"type": "text",
"text": message_text
},
{
"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,{base64_image}"
}
}
]
}
]
stream = client.chat.completions.create(
model="meta-llama/Llama-3.2-11B-Vision-Instruct",
messages=messages,
max_tokens=500,
stream=True
)
temp = ""
for chunk in stream:
if chunk.choices[0].delta.content is not None:
temp += chunk.choices[0].delta.content
yield temp
else:
model = "pixtral-large-2411"
messages = [{
"role": "user",
"content": [
{
"type": "text",
"text": message_text
},
{
"type": "image_url",
"image_url": f"data:image/jpeg;base64,{base64_image}",
},
],
}]
partial_message = ""
for chunk in Mistralclient.chat.stream(model=model, messages=messages):
if chunk.data.choices[0].delta.content is not None:
partial_message = partial_message + chunk.data.choices[0].delta.content
yield partial_message
def feifeichatmod(additional_dropdown, input_prompt):
if additional_dropdown == "mistralai/Mistral-Nemo-Instruct-2411":
model = "mistral-large-2411"
stream_response = Mistralclient.chat.stream(model=model, messages=input_prompt)
partial_message = ""
for chunk in stream_response:
if chunk.data.choices[0].delta.content is not None:
partial_message = partial_message + chunk.data.choices[0].delta.content
yield partial_message
else:
stream = client.chat.completions.create(
model=additional_dropdown,
messages=input_prompt,
temperature=0.5,
max_tokens=1024,
top_p=0.7,
stream=True
)
temp = ""
for chunk in stream:
if chunk.choices[0].delta.content is not None:
temp += chunk.choices[0].delta.content
yield temp
def feifeichat(message, history, feifei_select, additional_dropdown, image_mod):
message_text = message.get("text", "")
message_files = message.get("files", [])
if message_files:
for response in feifeiimgprompt(message_files, message_text, image_mod):
yield response
else:
if message_text.startswith("画") or message_text.startswith("draw"):
message_text = message_text.replace("画", "")
message_text = message_text.replace("draw", "")
message_text = f"提示词是'{message_text}',根据提示词帮我生成一张高质量照片的一句话英文回复"
for response in feifeichatmod(additional_dropdown, feifeiprompt(feifei_select, message_text, history)):
yield response
else:
for response in feifeichatmod(additional_dropdown, feifeiprompt(feifei_select, message_text, history)):
yield response
|