-
Notifications
You must be signed in to change notification settings - Fork 273
/
test_api.py
141 lines (117 loc) · 4.86 KB
/
test_api.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
"""
This file contains tests for the API of your model. You can run these tests by installing test requirements:
```bash
pip install -r requirements-test.txt
```
Then execute `pytest` in the directory of this file.
- Change `NewModel` to the name of the class in your model.py file.
- Change the `request` and `expected_response` variables to match the input and output of your model.
"""
import pytest
import json
import unittest.mock as mock
from model import OpenAIInteractive
# This is your mocked completion response
mocked_completion_response = {
"choices": [
{
"message": {
"content": "Mocked response"
}
}
]
}
class Message:
def __init__(self, content):
self.content = content
class Choice:
def __init__(self, message):
self.message = message
class MockedCompletionResponse:
def __init__(self, choices):
self.choices = choices
@pytest.fixture
def client():
from _wsgi import init_app
app = init_app(model_class=OpenAIInteractive)
app.config['TESTING'] = True
with app.test_client() as client:
yield client
@pytest.fixture
def mock_default_prompt():
with mock.patch.object(OpenAIInteractive, 'DEFAULT_PROMPT', new='''\
Classify text into different categories. Start each category prediction in a new line.
Text: {text}
Categories: {labels}'''):
yield
@pytest.fixture
def mock_not_use_internal_prompt_template():
with mock.patch.object(OpenAIInteractive, 'USE_INTERNAL_PROMPT_TEMPLATE', new=False):
yield
def test_predict(client, mock_default_prompt, mock_not_use_internal_prompt_template):
request = {
'tasks': [{
'data': {
'text': 'Long text to be classified into one of the topics.'
}
}],
# Your labeling configuration here
'label_config':
'<View>'
'<Text name="text" value="$text" />'
'<TextArea name="prompt" toName="text" required="true" />'
'<Choices name="topic" toName="text" choice="single">'
'<Choice value="sports" />'
'<Choice value="politics" />'
'<Choice value="technology" />'
'</Choices>'
'</View>'
}
expected_response_results = [{
# In the current implementation, predictions go first, then the prompt at the end to populate the text area
'result': [{
'from_name': 'topic',
'to_name': 'text',
'type': 'choices',
'value': {'choices': ['technology']}
}, {
'from_name': 'prompt',
'to_name': 'text',
'type': 'textarea',
'value': {
'text': [
'Classify text into different categories. Start each category prediction in a new line.\nText: {text}\nCategories: {labels}'
]
}
}]
}]
def mock_create(**kwargs):
assert kwargs["messages"][0]["content"] == '''\
Classify text into different categories. Start each category prediction in a new line.
Text: Long text to be classified into one of the topics.
Categories: ['sports', 'politics', 'technology']'''
assert kwargs["model"] == "gpt-3.5-turbo"
assert kwargs['n'] == 1
assert kwargs['temperature'] == 0.7
return MockedCompletionResponse(
choices=[
Choice(message=Message(content="tech"))
]
)
with mock.patch('openai.resources.chat.completions.Completions.create', side_effect=mock_create) as mock_chat:
# mock_chat.completions.create = mock_create
response = client.post('/predict', data=json.dumps(request), content_type='application/json')
# ensure mock was called
mock_chat.assert_called_once()
assert response.status_code == 200
response = json.loads(response.data)
assert len(response['results']) == len(expected_response_results)
assert response['results'][0]['result'][0]['from_name'] == expected_response_results[0]['result'][0]['from_name']
assert response['results'][0]['result'][0]['to_name'] == expected_response_results[0]['result'][0]['to_name']
assert response['results'][0]['result'][0]['type'] == expected_response_results[0]['result'][0]['type']
assert response['results'][0]['result'][0]['value'] == expected_response_results[0]['result'][0]['value']
assert response['results'][0]['result'][1]['from_name'] == expected_response_results[0]['result'][1]['from_name']
assert response['results'][0]['result'][1]['to_name'] == expected_response_results[0]['result'][1]['to_name']
assert response['results'][0]['result'][1]['type'] == expected_response_results[0]['result'][1]['type']
assert response['results'][0]['result'][1]['value'] == expected_response_results[0]['result'][1]['value']
# TODO: add tests for interactive modes, choices and textareas