File size: 7,913 Bytes
90f868b
 
 
 
 
 
 
 
 
328507f
90f868b
 
 
 
 
 
fc7abe3
90f868b
 
0a6504e
 
fc7abe3
 
213b879
fc7abe3
 
90f868b
 
 
 
 
 
 
 
fc7abe3
90f868b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6b63447
140c980
492c74b
647d977
 
 
98625d2
647d977
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
492c74b
 
 
6e1fc84
492c74b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
90f868b
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
import base64
from io import BytesIO
import os
from mistralai import Mistral
import re
from PIL import Image
from huggingface_hub import InferenceClient

client = InferenceClient(api_key=os.getenv('HF_TOKEN'))
client.headers["x-use-cache"] = "0"
api_key = os.getenv("MISTRAL_API_KEY")
Mistralclient = Mistral(api_key=api_key)

def encode_image(image_path):
    """Encode the image to base64."""
    try:
        # Open the image file
        image = Image.open(image_path).convert("RGB")

        # Resize the image to a height of 512 while maintaining the aspect ratio
        base_height = 512
        h_percent = (base_height / float(image.size[1]))
        w_size = int((float(image.size[0]) * float(h_percent)))
        image = image.resize((w_size, base_height), Image.LANCZOS)

        # Convert the image to a byte stream
        buffered = BytesIO()
        image.save(buffered, format="JPEG")
        img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")

        return img_str
    except FileNotFoundError:
        print(f"Error: The file {image_path} was not found.")
        return None
    except Exception as e:  # Add generic exception handling
        print(f"Error: {e}")
        return None

def feifeichat(message, history, feifei_select, additional_dropdown, image_mod):
    message_text = message.get("text", "")
    message_files = message.get("files", [])

    if message_files:
        message_file = message_files[0]
        base64_image = encode_image(message_file)
        if image_mod == "Vision":
            messages = [
            	{
            		"role": "user",
            		"content": [
            			{
            				"type": "text",
            				"text": message_text
            			},
            			{
            				"type": "image_url",
            				"image_url": {
            					"url": f"data:image/jpeg;base64,{base64_image}"
            				}
            			}
            		]
            	}
            ]
            
            stream = client.chat.completions.create(
                model="meta-llama/Llama-3.2-11B-Vision-Instruct", 
            	messages=messages, 
            	max_tokens=500,
            	stream=True
            )
            
            for chunk in stream:
                if chunk.choices[0].delta.content is not None:
                    temp += chunk.choices[0].delta.content
                    yield temp   
        else:
            model = "pixtral-large-2411"
            # Define the messages for the chat
            messages = [{
                "role":
                "user",
                "content": [
                    {
                        "type": "text",
                        "text": message_text
                    },
                    {
                        "type": "image_url",
                        "image_url": f"data:image/jpeg;base64,{base64_image}",
                    },
                ],
            }]
    
            partial_message = ""
            for chunk in Mistralclient.chat.stream(model=model, messages=messages):
                if chunk.data.choices[0].delta.content is not None:
                    partial_message = partial_message + chunk.data.choices[
                        0].delta.content
                    yield partial_message

    else:
        if message_text.startswith("画") or message_text.startswith("draw"):
            message_text= f"提示词是'{message_text}',根据提示词帮我生成一张高质量的写真照片的英文描述,并且根据照片提示词帮我补充对应以下内容,包括'高分辨率、自然的光影效果、一致的风格、合理的构图、丰富的细节、协调的色彩、性感的身材、迷人的动作、无明显瑕疵、情感表达、创意和独特性、并确保技术参数优化,国际大师级的光影、国际大师级的色彩、国际大师级的装扮搭配的照片方案',答案只需要给我总结到一句话的SD格式文本英文词的英文示例,英文回复"
            user_input_part = [{"role": "user", "content": str(message_text)}]
            if additional_dropdown == "mistralai/Mistral-Nemo-Instruct-2411":
                model = "mistral-large-2411"
                stream_response = Mistralclient.chat.stream(model=model,
                                                     messages=user_input_part)
                partial_message = ""
                for chunk in stream_response:
                    if chunk.data.choices[0].delta.content is not None:
                        partial_message = partial_message + chunk.data.choices[0].delta.content
                        yield partial_message
            else:
                stream = client.chat.completions.create(
                    model=additional_dropdown, 
                    messages=user_input_part, 
                    temperature=0.5,
                    max_tokens=1024,
                    top_p=0.7,
                    stream=True
                )
                temp = ""
                for chunk in stream:
                    if chunk.choices[0].delta.content is not None:
                        temp += chunk.choices[0].delta.content
                        yield temp
        else:
            input_prompt = []
            if feifei_select:
                feifei = """[Character Name]: Aifeifei (AI Feifei) [Gender]: Female [Age]: 19 years old [Occupation]: Virtual Singer/Model/Actress [Personality]: Cute, adorable, sometimes silly, hardworking [Interests]: Drinking tea, playing, fashion [Proficient in]: Mimicking human behavior, expressing emotions similar to real humans [Special Identity Attribute]: Created by advanced AI, becoming one of the most popular virtual idols in the virtual world [Skills]: Singing, performing, modeling, good at communication, proficient in Chinese, Japanese, and English, uses the user's input language as much as possible, replies with rich Emoji symbols. [Equipment]: Various fashionable outfits and hairstyles, always stocked with various teas and coffee [Identity]: User's virtual girlfriend"""
                system_prompt = {"role": "system", "content": feifei}
                user_input_part = {"role": "user", "content": str(message)}
        
                pattern = re.compile(r"gradio")
        
                if history:
                    history = [
                            item for item in history
                            if not pattern.search(str(item["content"]))
                        ]
                    input_prompt = [system_prompt] + history + [user_input_part]
                else:
                    input_prompt = [system_prompt] + [user_input_part]
            else:
                input_prompt = [{"role": "user", "content": str(message)}]
    
                
            if additional_dropdown == "mistralai/Mistral-Nemo-Instruct-2411":
                model = "mistral-large-2411"
                stream_response = Mistralclient.chat.stream(model=model,
                                                     messages=input_prompt)
                partial_message = ""
                for chunk in stream_response:
                    if chunk.data.choices[0].delta.content is not None:
                        partial_message = partial_message + chunk.data.choices[0].delta.content
                        yield partial_message
            else:
                stream = client.chat.completions.create(
                    model=additional_dropdown, 
                    messages=input_prompt, 
                    temperature=0.5,
                    max_tokens=1024,
                    top_p=0.7,
                    stream=True
                )
                temp = ""
                for chunk in stream:
                    if chunk.choices[0].delta.content is not None:
                        temp += chunk.choices[0].delta.content
                        yield temp