Spaces:
Running
on
Zero
Running
on
Zero
Upload 8 files
Browse files- config.py +0 -0
- feifeilib/feifeichat.py +22 -16
- feifeilib/feifeiflorence.py +13 -13
- feifeilib/feifeiimgtoimg.py +1 -0
- feifeilib/feifeitexttoimg.py +31 -35
- feifeiui/feifeiui.py +19 -17
config.py
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
feifeilib/feifeichat.py
CHANGED
@@ -45,23 +45,26 @@ def feifeichat(message, history, feifei_select):
|
|
45 |
# Specify model
|
46 |
model = "pixtral-large-2411"
|
47 |
# Define the messages for the chat
|
48 |
-
messages = [
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
|
|
|
|
60 |
|
61 |
partial_message = ""
|
62 |
for chunk in client.chat.stream(model=model, messages=messages):
|
63 |
if chunk.data.choices[0].delta.content is not None:
|
64 |
-
partial_message = partial_message + chunk.data.choices[
|
|
|
65 |
yield partial_message
|
66 |
|
67 |
else:
|
@@ -76,7 +79,8 @@ def feifeichat(message, history, feifei_select):
|
|
76 |
|
77 |
if history:
|
78 |
history = [
|
79 |
-
item for item in history
|
|
|
80 |
]
|
81 |
# print(history)
|
82 |
input_prompt = [system_prompt] + history + [user_input_part]
|
@@ -84,10 +88,12 @@ def feifeichat(message, history, feifei_select):
|
|
84 |
input_prompt = [system_prompt] + [user_input_part]
|
85 |
else:
|
86 |
input_prompt = [{"role": "user", "content": str(message)}]
|
87 |
-
stream_response = client.chat.stream(model=model,
|
|
|
88 |
|
89 |
partial_message = ""
|
90 |
for chunk in stream_response:
|
91 |
if chunk.data.choices[0].delta.content is not None:
|
92 |
-
partial_message = partial_message + chunk.data.choices[
|
|
|
93 |
yield partial_message
|
|
|
45 |
# Specify model
|
46 |
model = "pixtral-large-2411"
|
47 |
# Define the messages for the chat
|
48 |
+
messages = [{
|
49 |
+
"role":
|
50 |
+
"user",
|
51 |
+
"content": [
|
52 |
+
{
|
53 |
+
"type": "text",
|
54 |
+
"text": message_text
|
55 |
+
},
|
56 |
+
{
|
57 |
+
"type": "image_url",
|
58 |
+
"image_url": f"data:image/jpeg;base64,{base64_image}",
|
59 |
+
},
|
60 |
+
],
|
61 |
+
}]
|
62 |
|
63 |
partial_message = ""
|
64 |
for chunk in client.chat.stream(model=model, messages=messages):
|
65 |
if chunk.data.choices[0].delta.content is not None:
|
66 |
+
partial_message = partial_message + chunk.data.choices[
|
67 |
+
0].delta.content
|
68 |
yield partial_message
|
69 |
|
70 |
else:
|
|
|
79 |
|
80 |
if history:
|
81 |
history = [
|
82 |
+
item for item in history
|
83 |
+
if not pattern.search(str(item["content"]))
|
84 |
]
|
85 |
# print(history)
|
86 |
input_prompt = [system_prompt] + history + [user_input_part]
|
|
|
88 |
input_prompt = [system_prompt] + [user_input_part]
|
89 |
else:
|
90 |
input_prompt = [{"role": "user", "content": str(message)}]
|
91 |
+
stream_response = client.chat.stream(model=model,
|
92 |
+
messages=input_prompt)
|
93 |
|
94 |
partial_message = ""
|
95 |
for chunk in stream_response:
|
96 |
if chunk.data.choices[0].delta.content is not None:
|
97 |
+
partial_message = partial_message + chunk.data.choices[
|
98 |
+
0].delta.content
|
99 |
yield partial_message
|
feifeilib/feifeiflorence.py
CHANGED
@@ -18,18 +18,18 @@ device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
|
18 |
torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
|
19 |
|
20 |
Florence_models = AutoModelForCausalLM.from_pretrained(
|
21 |
-
"microsoft/Florence-2-large",
|
22 |
-
|
|
|
23 |
|
24 |
Florence_processors = AutoProcessor.from_pretrained(
|
25 |
-
"microsoft/Florence-2-large", trust_remote_code=True
|
26 |
-
)
|
27 |
|
28 |
|
29 |
@spaces.GPU
|
30 |
def feifeiflorence(
|
31 |
-
|
32 |
-
|
33 |
):
|
34 |
image = Image.fromarray(image)
|
35 |
task_prompt = "<MORE_DETAILED_CAPTION>"
|
@@ -37,9 +37,9 @@ def feifeiflorence(
|
|
37 |
if image.mode != "RGB":
|
38 |
image = image.convert("RGB")
|
39 |
|
40 |
-
inputs = Florence_processors(
|
41 |
-
|
42 |
-
|
43 |
|
44 |
generated_ids = Florence_models.generate(
|
45 |
input_ids=inputs["input_ids"],
|
@@ -49,9 +49,9 @@ def feifeiflorence(
|
|
49 |
do_sample=False,
|
50 |
)
|
51 |
generated_text = Florence_processors.batch_decode(
|
52 |
-
generated_ids, skip_special_tokens=False
|
53 |
-
)[0]
|
54 |
parsed_answer = Florence_processors.post_process_generation(
|
55 |
-
generated_text,
|
56 |
-
|
|
|
57 |
return parsed_answer["<MORE_DETAILED_CAPTION>"]
|
|
|
18 |
torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
|
19 |
|
20 |
Florence_models = AutoModelForCausalLM.from_pretrained(
|
21 |
+
"microsoft/Florence-2-large",
|
22 |
+
torch_dtype=torch_dtype,
|
23 |
+
trust_remote_code=True).to(device)
|
24 |
|
25 |
Florence_processors = AutoProcessor.from_pretrained(
|
26 |
+
"microsoft/Florence-2-large", trust_remote_code=True)
|
|
|
27 |
|
28 |
|
29 |
@spaces.GPU
|
30 |
def feifeiflorence(
|
31 |
+
image,
|
32 |
+
progress=gr.Progress(track_tqdm=True),
|
33 |
):
|
34 |
image = Image.fromarray(image)
|
35 |
task_prompt = "<MORE_DETAILED_CAPTION>"
|
|
|
37 |
if image.mode != "RGB":
|
38 |
image = image.convert("RGB")
|
39 |
|
40 |
+
inputs = Florence_processors(text=task_prompt,
|
41 |
+
images=image,
|
42 |
+
return_tensors="pt").to(device, torch_dtype)
|
43 |
|
44 |
generated_ids = Florence_models.generate(
|
45 |
input_ids=inputs["input_ids"],
|
|
|
49 |
do_sample=False,
|
50 |
)
|
51 |
generated_text = Florence_processors.batch_decode(
|
52 |
+
generated_ids, skip_special_tokens=False)[0]
|
|
|
53 |
parsed_answer = Florence_processors.post_process_generation(
|
54 |
+
generated_text,
|
55 |
+
task=task_prompt,
|
56 |
+
image_size=(image.width, image.height))
|
57 |
return parsed_answer["<MORE_DETAILED_CAPTION>"]
|
feifeilib/feifeiimgtoimg.py
CHANGED
@@ -4,5 +4,6 @@ import numpy as np
|
|
4 |
MAX_SEED = np.iinfo(np.int32).max
|
5 |
MAX_IMAGE_SIZE = 4096
|
6 |
|
|
|
7 |
def feifeiimgtoimg(img_in_result, prompt):
|
8 |
return img_in_result
|
|
|
4 |
MAX_SEED = np.iinfo(np.int32).max
|
5 |
MAX_IMAGE_SIZE = 4096
|
6 |
|
7 |
+
|
8 |
def feifeiimgtoimg(img_in_result, prompt):
|
9 |
return img_in_result
|
feifeilib/feifeitexttoimg.py
CHANGED
@@ -23,19 +23,19 @@ artists = [artist.strip() for artist in artists]
|
|
23 |
|
24 |
@spaces.GPU()
|
25 |
def feifeitexttoimg(
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
):
|
40 |
guidance_scale = 3.5
|
41 |
if randomize_seed:
|
@@ -85,41 +85,37 @@ def feifeitexttoimg(
|
|
85 |
image_np = np.array(image)
|
86 |
|
87 |
# 将NumPy数组转换为PyTorch张量
|
88 |
-
image_tensor = (
|
89 |
-
|
90 |
-
)
|
91 |
|
92 |
# 定义锐化滤镜,并调整中心值
|
93 |
strength = num_strength
|
94 |
-
sharpen_kernel = (
|
95 |
-
|
96 |
-
[
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
)
|
103 |
-
.unsqueeze(0)
|
104 |
-
.unsqueeze(0)
|
105 |
-
.to("cuda")
|
106 |
-
)
|
107 |
|
108 |
# 分别对每个通道应用卷积核
|
109 |
sharpened_channels = []
|
110 |
for i in range(3):
|
111 |
-
channel_tensor = image_tensor[:, i
|
112 |
-
sharpened_channel = F.conv2d(channel_tensor,
|
|
|
|
|
113 |
sharpened_channels.append(sharpened_channel)
|
114 |
|
115 |
# 合并通道
|
116 |
sharpened_image_tensor = torch.cat(sharpened_channels, dim=1)
|
117 |
|
118 |
# 将增强后的图像转换回PIL格式
|
119 |
-
sharpened_image_np = (
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
image = Image.fromarray(sharpened_image_np)
|
124 |
|
125 |
return image, seed
|
|
|
23 |
|
24 |
@spaces.GPU()
|
25 |
def feifeitexttoimg(
|
26 |
+
prompt,
|
27 |
+
quality_select,
|
28 |
+
sharpened_select,
|
29 |
+
styles_Radio,
|
30 |
+
FooocusExpansion_select,
|
31 |
+
seed=42,
|
32 |
+
randomize_seed=False,
|
33 |
+
width=1024,
|
34 |
+
height=1024,
|
35 |
+
num_inference_steps=4,
|
36 |
+
guidance_scale=3.5,
|
37 |
+
num_strength=0.35,
|
38 |
+
progress=gr.Progress(track_tqdm=True),
|
39 |
):
|
40 |
guidance_scale = 3.5
|
41 |
if randomize_seed:
|
|
|
85 |
image_np = np.array(image)
|
86 |
|
87 |
# 将NumPy数组转换为PyTorch张量
|
88 |
+
image_tensor = (torch.tensor(image_np).permute(
|
89 |
+
2, 0, 1).unsqueeze(0).float().to("cuda"))
|
|
|
90 |
|
91 |
# 定义锐化滤镜,并调整中心值
|
92 |
strength = num_strength
|
93 |
+
sharpen_kernel = (torch.tensor(
|
94 |
+
[
|
95 |
+
[0, -1 * strength, 0],
|
96 |
+
[-1 * strength, 1 + 4 * strength, -1 * strength],
|
97 |
+
[0, -1 * strength, 0],
|
98 |
+
],
|
99 |
+
dtype=torch.float32,
|
100 |
+
).unsqueeze(0).unsqueeze(0).to("cuda"))
|
|
|
|
|
|
|
|
|
|
|
101 |
|
102 |
# 分别对每个通道应用卷积核
|
103 |
sharpened_channels = []
|
104 |
for i in range(3):
|
105 |
+
channel_tensor = image_tensor[:, i:i + 1, :, :]
|
106 |
+
sharpened_channel = F.conv2d(channel_tensor,
|
107 |
+
sharpen_kernel,
|
108 |
+
padding=1)
|
109 |
sharpened_channels.append(sharpened_channel)
|
110 |
|
111 |
# 合并通道
|
112 |
sharpened_image_tensor = torch.cat(sharpened_channels, dim=1)
|
113 |
|
114 |
# 将增强后的图像转换回PIL格式
|
115 |
+
sharpened_image_np = (sharpened_image_tensor.squeeze(0).permute(
|
116 |
+
1, 2, 0).cpu().numpy())
|
117 |
+
sharpened_image_np = np.clip(sharpened_image_np, 0,
|
118 |
+
255).astype(np.uint8)
|
119 |
image = Image.fromarray(sharpened_image_np)
|
120 |
|
121 |
return image, seed
|
feifeiui/feifeiui.py
CHANGED
@@ -31,12 +31,11 @@ def create_ui():
|
|
31 |
container=False,
|
32 |
)
|
33 |
run_button = gr.Button("Run")
|
34 |
-
result = gr.Image(
|
35 |
-
|
36 |
-
|
37 |
|
38 |
with gr.Accordion("Advanced Settings", open=False):
|
39 |
-
|
40 |
seed = gr.Slider(
|
41 |
label="Seed",
|
42 |
minimum=0,
|
@@ -45,10 +44,10 @@ def create_ui():
|
|
45 |
value=0,
|
46 |
)
|
47 |
|
48 |
-
randomize_seed = gr.Checkbox(label="Randomize seed",
|
|
|
49 |
|
50 |
with gr.Row():
|
51 |
-
|
52 |
width = gr.Slider(
|
53 |
label="Width",
|
54 |
minimum=256,
|
@@ -97,23 +96,26 @@ def create_ui():
|
|
97 |
container=False,
|
98 |
)
|
99 |
img_run_button = gr.Button("Img2Img")
|
100 |
-
img_out_result = gr.Image(
|
101 |
-
|
102 |
-
|
103 |
with gr.Tab("Styles"):
|
104 |
quality_select = gr.Checkbox(label="high quality")
|
105 |
-
sharpened_select = gr.Checkbox(label="Sharpened"
|
|
|
106 |
FooocusExpansion_select = gr.Checkbox(
|
107 |
-
label="FooocusExpansion", value=True
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
|
|
113 |
with gr.Tab("Florence-2"):
|
114 |
with gr.Row():
|
115 |
with gr.Column():
|
116 |
-
output_text = gr.Textbox(label="Output Text",
|
|
|
117 |
florence_btn = gr.Button(value="Florence")
|
118 |
with gr.Column():
|
119 |
input_img = gr.Image(label="Input Picture")
|
|
|
31 |
container=False,
|
32 |
)
|
33 |
run_button = gr.Button("Run")
|
34 |
+
result = gr.Image(label="Result",
|
35 |
+
show_label=False,
|
36 |
+
interactive=False)
|
37 |
|
38 |
with gr.Accordion("Advanced Settings", open=False):
|
|
|
39 |
seed = gr.Slider(
|
40 |
label="Seed",
|
41 |
minimum=0,
|
|
|
44 |
value=0,
|
45 |
)
|
46 |
|
47 |
+
randomize_seed = gr.Checkbox(label="Randomize seed",
|
48 |
+
value=True)
|
49 |
|
50 |
with gr.Row():
|
|
|
51 |
width = gr.Slider(
|
52 |
label="Width",
|
53 |
minimum=256,
|
|
|
96 |
container=False,
|
97 |
)
|
98 |
img_run_button = gr.Button("Img2Img")
|
99 |
+
img_out_result = gr.Image(label="Result",
|
100 |
+
show_label=False,
|
101 |
+
interactive=False)
|
102 |
with gr.Tab("Styles"):
|
103 |
quality_select = gr.Checkbox(label="high quality")
|
104 |
+
sharpened_select = gr.Checkbox(label="Sharpened",
|
105 |
+
value=True)
|
106 |
FooocusExpansion_select = gr.Checkbox(
|
107 |
+
label="FooocusExpansion", value=True)
|
108 |
+
styles_name = [
|
109 |
+
style["name"] for style in config.style_list
|
110 |
+
]
|
111 |
+
styles_Radio = gr.Dropdown(styles_name,
|
112 |
+
label="Styles",
|
113 |
+
multiselect=True)
|
114 |
with gr.Tab("Florence-2"):
|
115 |
with gr.Row():
|
116 |
with gr.Column():
|
117 |
+
output_text = gr.Textbox(label="Output Text",
|
118 |
+
container=False)
|
119 |
florence_btn = gr.Button(value="Florence")
|
120 |
with gr.Column():
|
121 |
input_img = gr.Image(label="Input Picture")
|