-
Notifications
You must be signed in to change notification settings - Fork 54
/
Copy pathollama.py
59 lines (48 loc) · 1.62 KB
/
ollama.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
import requests
import base64
class Ollama:
def __init__(self):
self.base_url = "http://localhost:11434"
def chat(self, messages, model, temperature=0.0):
url = f"{self.base_url}/api/chat"
headers = {
"Content-Type": "application/json",
}
data = {
"model": model,
"messages": messages,
"temperature": temperature,
"stream": False,
}
response = requests.post(url, json=data, headers=headers)
response.raise_for_status()
return response.json()["message"]['content']
def embeddings(self, prompt):
url = f"{self.base_url}/api/embeddings"
headers = {
"Content-Type": "application/json",
}
data = {
"prompt": prompt,
"model": "nomic-embed-text",
}
response = requests.post(url, json=data, headers=headers)
response.raise_for_status()
return response.json()["embedding"]
def analyze_image(self, image_path, model, prompt):
with open(image_path, "rb") as image_file:
image_data = image_file.read()
base64_image = base64.b64encode(image_data).decode("utf-8")
url = f"{self.base_url}/api/generate"
headers = {
"Content-Type": "application/json",
}
data = {
"model": model,
"prompt": prompt,
"images": [base64_image],
"stream": False,
}
response = requests.post(url, json=data, headers=headers)
response.raise_for_status()
return response.json()["response"]