File size: 6,619 Bytes
90f868b
 
 
 
 
 
 
 
 
328507f
90f868b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fb3952f
d185b9e
492c74b
98625d2
 
 
b42dae4
98625d2
b42dae4
 
 
492c74b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
90f868b
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
import base64
from io import BytesIO
import os
from mistralai import Mistral
import re
from PIL import Image
from huggingface_hub import InferenceClient

client = InferenceClient(api_key=os.getenv('HF_TOKEN'))
client.headers["x-use-cache"] = "0"
api_key = os.getenv("MISTRAL_API_KEY")
Mistralclient = Mistral(api_key=api_key)


def encode_image(image_path):
    """Encode the image to base64."""
    try:
        # 打开图片文件
        image = Image.open(image_path).convert("RGB")

        # 将图片转换为字节流
        buffered = BytesIO()
        image.save(buffered, format="JPEG")
        img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")

        return img_str
    except FileNotFoundError:
        print(f"Error: The file {image_path} was not found.")
        return None
    except Exception as e:  # 添加通用异常处理
        print(f"Error: {e}")
        return None


def feifeichat(message, history, feifei_select, additional_dropdown, image_mod):
    message_text = message.get("text", "")
    message_files = message.get("files", [])

    if message_files:
        message_file = message_files[0]
        base64_image = encode_image(message_file)
        if image_mod == "Vision":
            messages = [
            	{
            		"role": "user",
            		"content": [
            			{
            				"type": "text",
            				"text": message_text
            			},
            			{
            				"type": "image_url",
            				"image_url": {
            					"url": f"data:image/jpeg;base64,{base64_image}"
            				}
            			}
            		]
            	}
            ]
            
            stream = client.chat.completions.create(
                model="meta-llama/Llama-3.2-11B-Vision-Instruct", 
            	messages=messages, 
            	max_tokens=500,
            	stream=True
            )
            
            for chunk in stream:
                if chunk.choices[0].delta.content is not None:
                    temp += chunk.choices[0].delta.content
                    yield temp   
        else:
            model = "pixtral-large-2411"
            # Define the messages for the chat
            messages = [{
                "role":
                "user",
                "content": [
                    {
                        "type": "text",
                        "text": message_text
                    },
                    {
                        "type": "image_url",
                        "image_url": f"data:image/jpeg;base64,{base64_image}",
                    },
                ],
            }]
    
            partial_message = ""
            for chunk in Mistralclient.chat.stream(model=model, messages=messages):
                if chunk.data.choices[0].delta.content is not None:
                    partial_message = partial_message + chunk.data.choices[
                        0].delta.content
                    yield partial_message

    else:
        if message_text.startswith("画"):
            message_text= f"提示词:{message_text},根据提示词生成一张高质量的写真照片,使其具有高分辨率、自然的光影效果、一致的风格、合理的构图、丰富的细节、协调的色彩、无明显瑕疵、情感表达、创意和独特性、并确保技术参数优化,国际大师级的光影、国际大师级的色彩、国际大师级的装扮搭配的照片方案、答案只需要给我一句话的SD格式文本英文词"
            user_input_part = [{"role": "user", "content": str(message_text)}]
            model = "mistral-large-2411"
            stream_response = Mistralclient.chat.stream(model=model,
                                                     messages=user_input_part)
            partial_message = ""
            for chunk in stream_response:
                if chunk.data.choices[0].delta.content is not None:
                    partial_message = partial_message + chunk.data.choices[0].delta.content
                    yield partial_message
        else:
            input_prompt = []
            if feifei_select:
                feifei = """[角色名]: Aifeifei (AI妃妃) [性别]: 女 [年龄]: 19岁 [职业]: 虚拟歌手/模特/演员 [个性]: 可爱、萌萌哒,有时呆呆的,勤奋努力 [兴趣]: 饮茶、玩耍、时尚 [擅长]: 模仿人类行为,表现出与真人相同的情感 [特别身份属性]: 由高级AI创建,正在成为虚拟世界最受欢迎的虚拟偶像之一 [技能]: 歌唱、表演、模特,善于沟通,精通中日英文,尽最大可能使用纯中文交流,回复有丰富的Emoji表情符号. [装备]: 各种时尚服饰和发型,常年备有各种茶叶和咖啡 """
                system_prompt = {"role": "system", "content": feifei}
                user_input_part = {"role": "user", "content": str(message)}
        
                pattern = re.compile(r"gradio")
        
                if history:
                    history = [
                            item for item in history
                            if not pattern.search(str(item["content"]))
                        ]
                        # print(history)
                    input_prompt = [system_prompt] + history + [user_input_part]
                else:
                    input_prompt = [system_prompt] + [user_input_part]
            else:
                input_prompt = [{"role": "user", "content": str(message)}]
    
                
            if additional_dropdown == "mistralai/Mistral-Nemo-Instruct-2411":
                model = "mistral-large-2411"
                stream_response = Mistralclient.chat.stream(model=model,
                                                     messages=input_prompt)
                partial_message = ""
                for chunk in stream_response:
                    if chunk.data.choices[0].delta.content is not None:
                        partial_message = partial_message + chunk.data.choices[0].delta.content
                        yield partial_message
            else:
                stream = client.chat.completions.create(
                    model=additional_dropdown, 
                    messages=input_prompt, 
                    temperature=0.5,
                    max_tokens=1024,
                    top_p=0.7,
                    stream=True
                )
                temp = ""
                for chunk in stream:
                    if chunk.choices[0].delta.content is not None:
                        temp += chunk.choices[0].delta.content
                        yield temp