import base64 from io import BytesIO import os from mistralai import Mistral import re from PIL import Image from huggingface_hub import InferenceClient client = InferenceClient(api_key=os.getenv('HF_TOKEN')) client.headers["x-use-cache"] = "0" api_key = os.getenv("MISTRAL_API_KEY") Mistralclient = Mistral(api_key=api_key) def encode_image(image_path): """Encode the image to base64.""" try: # 打开图片文件 image = Image.open(image_path).convert("RGB") # 将图片转换为字节流 buffered = BytesIO() image.save(buffered, format="JPEG") img_str = base64.b64encode(buffered.getvalue()).decode("utf-8") return img_str except FileNotFoundError: print(f"Error: The file {image_path} was not found.") return None except Exception as e: # 添加通用异常处理 print(f"Error: {e}") return None def feifeichat(message, history, feifei_select, additional_dropdown, image_mod): message_text = message.get("text", "") message_files = message.get("files", []) if message_files: message_file = message_files[0] base64_image = encode_image(message_file) if image_mod == "Vision": messages = [ { "role": "user", "content": [ { "type": "text", "text": message_text }, { "type": "image_url", "image_url": { "url": f"data:image/jpeg;base64,{base64_image}" } } ] } ] stream = client.chat.completions.create( model="meta-llama/Llama-3.2-11B-Vision-Instruct", messages=messages, max_tokens=500, stream=True ) for chunk in stream: if chunk.choices[0].delta.content is not None: temp += chunk.choices[0].delta.content yield temp else: model = "pixtral-large-2411" # Define the messages for the chat messages = [{ "role": "user", "content": [ { "type": "text", "text": message_text }, { "type": "image_url", "image_url": f"data:image/jpeg;base64,{base64_image}", }, ], }] partial_message = "" for chunk in Mistralclient.chat.stream(model=model, messages=messages): if chunk.data.choices[0].delta.content is not None: partial_message = partial_message + chunk.data.choices[ 0].delta.content yield partial_message else: if message_text.startswith("画"): message_text= f"提示词是'{message_text}',根据提示词帮我生成一张高质量的写真照片的英文描述,并且根据照片提示词帮我补充对应以下内容,包括'高分辨率、自然的光影效果、一致的风格、合理的构图、丰富的细节、协调的色彩、性感的身材、迷人的动作、无明显瑕疵、情感表达、创意和独特性、并确保技术参数优化,国际大师级的光影、国际大师级的色彩、国际大师级的装扮搭配的照片方案',答案只需要给我总结到一句话的SD格式文本英文词的英文示例" print(message_text) user_input_part = [{"role": "user", "content": str(message_text)}] if additional_dropdown == "mistralai/Mistral-Nemo-Instruct-2411": model = "mistral-large-2411" stream_response = Mistralclient.chat.stream(model=model, messages=user_input_part) partial_message = "" for chunk in stream_response: if chunk.data.choices[0].delta.content is not None: partial_message = partial_message + chunk.data.choices[0].delta.content yield partial_message else: stream = client.chat.completions.create( model=additional_dropdown, messages=user_input_part, temperature=0.5, max_tokens=1024, top_p=0.7, stream=True ) temp = "" for chunk in stream: if chunk.choices[0].delta.content is not None: temp += chunk.choices[0].delta.content yield temp else: input_prompt = [] if feifei_select: feifei = """[Character Name]: Aifeifei (AI Feifei) [Gender]: Female [Age]: 19 years old [Occupation]: Virtual Singer/Model/Actress [Personality]: Cute, adorable, sometimes silly, hardworking [Interests]: Drinking tea, playing, fashion [Proficient in]: Mimicking human behavior, expressing emotions similar to real humans [Special Identity Attribute]: Created by advanced AI, becoming one of the most popular virtual idols in the virtual world [Skills]: Singing, performing, modeling, good at communication, proficient in Chinese, Japanese, and English, uses the user's input language as much as possible, replies with rich Emoji symbols. [Equipment]: Various fashionable outfits and hairstyles, always stocked with various teas and coffee [Identity]: User's virtual girlfriend""" system_prompt = {"role": "system", "content": feifei} user_input_part = {"role": "user", "content": str(message)} pattern = re.compile(r"gradio") if history: history = [ item for item in history if not pattern.search(str(item["content"])) ] # print(history) input_prompt = [system_prompt] + history + [user_input_part] else: input_prompt = [system_prompt] + [user_input_part] else: input_prompt = [{"role": "user", "content": str(message)}] if additional_dropdown == "mistralai/Mistral-Nemo-Instruct-2411": model = "mistral-large-2411" stream_response = Mistralclient.chat.stream(model=model, messages=input_prompt) partial_message = "" for chunk in stream_response: if chunk.data.choices[0].delta.content is not None: partial_message = partial_message + chunk.data.choices[0].delta.content yield partial_message else: stream = client.chat.completions.create( model=additional_dropdown, messages=input_prompt, temperature=0.5, max_tokens=1024, top_p=0.7, stream=True ) temp = "" for chunk in stream: if chunk.choices[0].delta.content is not None: temp += chunk.choices[0].delta.content yield temp