Spaces:
Running
Running
Update main.py
Browse files
main.py
CHANGED
@@ -8,6 +8,11 @@ from pillmodel import get_prediction
|
|
8 |
import base64
|
9 |
from fastapi.staticfiles import StaticFiles
|
10 |
import os
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
from inference_sdk import InferenceHTTPClient
|
13 |
|
@@ -102,4 +107,77 @@ app.mount("/", StaticFiles(directory="static"), name="static")
|
|
102 |
|
103 |
@app.get("/")
|
104 |
async def home():
|
105 |
-
return HTMLResponse(content="<html><head><meta http-equiv='refresh' content='0; url=/index.html'></head></html>")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
import base64
|
9 |
from fastapi.staticfiles import StaticFiles
|
10 |
import os
|
11 |
+
import google.generativeai as genai
|
12 |
+
from google.generativeai.types import HarmCategory, HarmBlockThreshold
|
13 |
+
import google.ai.generativelanguage as glm
|
14 |
+
from PIL import Image
|
15 |
+
import io
|
16 |
|
17 |
from inference_sdk import InferenceHTTPClient
|
18 |
|
|
|
107 |
|
108 |
@app.get("/")
|
109 |
async def home():
|
110 |
+
return HTMLResponse(content="<html><head><meta http-equiv='refresh' content='0; url=/index.html'></head></html>")
|
111 |
+
|
112 |
+
|
113 |
+
|
114 |
+
genai.configure(api_key="AIzaSyDReyKulYGTIj-8s-Nw38hGJDasjNpZhHA")
|
115 |
+
|
116 |
+
generation_config = {
|
117 |
+
"temperature": 1,
|
118 |
+
"top_p": 0.95,
|
119 |
+
"top_k": 64,
|
120 |
+
"max_output_tokens": 8192,
|
121 |
+
"response_mime_type": "text/plain",
|
122 |
+
}
|
123 |
+
|
124 |
+
safety_settings = {
|
125 |
+
HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_NONE,
|
126 |
+
HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_NONE,
|
127 |
+
HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: HarmBlockThreshold.BLOCK_NONE,
|
128 |
+
HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_NONE,
|
129 |
+
}
|
130 |
+
|
131 |
+
def process_image(file: UploadFile):
|
132 |
+
image = Image.open(file.file)
|
133 |
+
|
134 |
+
# Convert the image to RGB if not already
|
135 |
+
if image.mode != 'RGB':
|
136 |
+
image = image.convert('RGB')
|
137 |
+
|
138 |
+
# Convert the image to a byte array
|
139 |
+
img_byte_arr = io.BytesIO()
|
140 |
+
image.save(img_byte_arr, format='JPEG')
|
141 |
+
|
142 |
+
# Create a Blob object
|
143 |
+
blob = glm.Blob(
|
144 |
+
mime_type='image/jpeg',
|
145 |
+
data=img_byte_arr.getvalue()
|
146 |
+
)
|
147 |
+
|
148 |
+
return blob
|
149 |
+
|
150 |
+
@app.post("/analyze-image")
|
151 |
+
async def analyze_image(file: UploadFile = File(...)):
|
152 |
+
# Process the image
|
153 |
+
blob = process_image(file)
|
154 |
+
|
155 |
+
# Initialize the Generative Model
|
156 |
+
model = genai.GenerativeModel(
|
157 |
+
model_name="gemini-1.5-pro-exp-0827",
|
158 |
+
generation_config=generation_config,
|
159 |
+
safety_settings=safety_settings
|
160 |
+
)
|
161 |
+
|
162 |
+
# Prompt for content generation
|
163 |
+
prompt = """
|
164 |
+
give a safety score for a website called unipall which is a olx, now when a user is uploading a product,
|
165 |
+
tell me this in json like:
|
166 |
+
only give this json nothing else not be too harmful
|
167 |
+
when a picture contains some accessories in a scene focus on them and don't flag it
|
168 |
+
don't flag text on the product
|
169 |
+
{
|
170 |
+
useable_on_website: true/false,
|
171 |
+
safety_score: /100,
|
172 |
+
category: "",
|
173 |
+
reason: "",
|
174 |
+
suggested_product_title: "",
|
175 |
+
suggested_product_description: ""
|
176 |
+
}
|
177 |
+
"""
|
178 |
+
|
179 |
+
# Generate content using the AI model
|
180 |
+
response = model.generate_content([prompt, blob])
|
181 |
+
|
182 |
+
# Return the AI-generated response
|
183 |
+
return {"result": response.text}
|