Spaces:
Sleeping
Sleeping
| from openai import OpenAI | |
| import os | |
| import base64 | |
| import requests | |
| from prompts import prompts | |
| from constants import JSON_SCHEMA_FOR_GPT | |
| OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") | |
| client = OpenAI(api_key=OPENAI_API_KEY) | |
| model = "gpt-4o" | |
| title = "Caimera Mood board Expert" | |
| def createAssistant(instruction_prompt): | |
| instructions = instruction_prompt | |
| assistant = client.beta.assistants.create( | |
| name=title, | |
| instructions=instructions, | |
| model=model | |
| ) | |
| return assistant.id | |
| def saveFileOpenAI(location): | |
| with open(location, "rb") as f: | |
| file = client.files.create(file=f, purpose="vision") | |
| os.remove(location) | |
| return file.id | |
| def startAssistantThread(file_id_enum, prompt_n, image_needed, json_mode_needed_or_not): | |
| if json_mode_needed_or_not == "yes": | |
| if image_needed == "yes": | |
| messages = [ | |
| { | |
| "role": "user", | |
| "content": [ | |
| { | |
| "type": "text", | |
| "text": prompt_n | |
| } | |
| ], | |
| } | |
| ] | |
| for file_id in file_id_enum: | |
| messages[0]["content"].append({ | |
| "type": "image_file", | |
| "image_file": {"file_id": file_id} | |
| }) | |
| else: | |
| messages = [ | |
| { | |
| "role": "user", | |
| "content": prompt_n}] | |
| thread = client.beta.threads.create(messages=messages) | |
| else: | |
| if image_needed == "yes": | |
| messages = [ | |
| { | |
| "role": "user", | |
| "content": [ | |
| { | |
| "type": "text", | |
| "text": prompt_n | |
| } | |
| ], | |
| } | |
| ] | |
| for file_id in file_id_enum: | |
| messages[0]["content"].append({ | |
| "type": "image_file", | |
| "image_file": {"file_id": file_id} | |
| }) | |
| else: | |
| messages = [ | |
| { | |
| "role": "user", | |
| "content": prompt_n}] | |
| thread = client.beta.threads.create(messages=messages) | |
| return thread.id | |
| def runAssistant(thread_id, assistant_id): | |
| run = client.beta.threads.runs.create(thread_id=thread_id, assistant_id=assistant_id) | |
| return run.id | |
| def checkRunStatus(thread_id, run_id): | |
| run = client.beta.threads.runs.retrieve(thread_id=thread_id, run_id=run_id) | |
| return run.status | |
| def retrieveThread(thread_id): | |
| thread_messages = client.beta.threads.messages.list(thread_id) | |
| list_messages = thread_messages.data | |
| thread_messages = [] | |
| for message in list_messages: | |
| obj = {} | |
| obj['content'] = message.content[0].text.value | |
| obj['role'] = message.role | |
| thread_messages.append(obj) | |
| return thread_messages[::-1] | |
| def addMessageToThread(thread_id, prompt_n): | |
| thread_message = client.beta.threads.messages.create(thread_id, role="user", content=prompt_n) | |
| def create_chat_completion_request_open_ai_for_summary(prompt, json_mode, schema_name="", | |
| json_schema="", | |
| system_message="You are expert in Fashion " | |
| "Shoots"): | |
| import requests | |
| if json_mode == "No": | |
| url = "https://api.openai.com/v1/chat/completions" | |
| headers = { | |
| "Content-Type": "application/json", | |
| "Authorization": f"Bearer {OPENAI_API_KEY}" | |
| } | |
| data = { | |
| "model": "gpt-4o", | |
| "messages": [ | |
| { | |
| "role": "system", | |
| "content": system_message | |
| }, | |
| { | |
| "role": "user", | |
| "content": prompt | |
| } | |
| ] | |
| } | |
| response = requests.post(url, headers=headers, json=data) | |
| json_response = response.json() | |
| else: | |
| url = "https://api.openai.com/v1/chat/completions" | |
| headers = { | |
| "Content-Type": "application/json", | |
| "Authorization": f"Bearer {OPENAI_API_KEY}" | |
| } | |
| data = { | |
| "model": "gpt-4o", | |
| "messages": [ | |
| { | |
| "role": "system", | |
| "content": "You are expert in creating prompts for Fashion Shoots." | |
| }, | |
| { | |
| "role": "user", | |
| "content": prompt | |
| } | |
| ], | |
| "response_format": {"type": "json_schema", "json_schema": {"name": schema_name, "strict": True, "schema": | |
| json_schema}} | |
| } | |
| response = requests.post(url, headers=headers, json=data) | |
| json_response = response.json() | |
| print(json_response) | |
| return json_response["choices"][0]["message"]["content"] | |
| def encode_image(image_path): | |
| with open(image_path, "rb") as image_file: | |
| return base64.b64encode(image_file.read()).decode('utf-8') | |
| def create_image_completion_request_gpt(image_path, prompt): | |
| base64_image = encode_image(image_path) | |
| headers = { | |
| "Content-Type": "application/json", | |
| "Authorization": f"Bearer {OPENAI_API_KEY}" | |
| } | |
| payload = { | |
| "model": "gpt-4o", | |
| "messages": [ | |
| { | |
| "role": "user", | |
| "content": [ | |
| { | |
| "type": "text", | |
| "text": prompt | |
| }, | |
| { | |
| "type": "image_url", | |
| "image_url": { | |
| "url": f"data:image/jpeg;base64,{base64_image}" | |
| } | |
| } | |
| ] | |
| } | |
| ], | |
| } | |
| response = requests.post("https://api.openai.com/v1/chat/completions", | |
| headers=headers, json=payload) | |
| json_resp = response.json() | |
| return json_resp["choices"][0]["message"]["content"] | |