Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin' into doc-openaichat
Browse files Browse the repository at this point in the history
  • Loading branch information
MiriamScharnke committed Aug 6, 2024
2 parents 3da3ae6 + 2cbda4d commit c357e75
Show file tree
Hide file tree
Showing 33 changed files with 198 additions and 30 deletions.
2 changes: 1 addition & 1 deletion +llms/+internal/callAzureChatAPI.m
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@

parameters = buildParametersCall(messages, functions, nvp);

[response, streamedText] = llms.internal.sendRequest(parameters,nvp.APIKey, URL, nvp.TimeOut, nvp.StreamFun);
[response, streamedText] = llms.internal.sendRequestWrapper(parameters,nvp.APIKey, URL, nvp.TimeOut, nvp.StreamFun);

% If call errors, "choices" will not be part of response.Body.Data, instead
% we get response.Body.Data.error
Expand Down
2 changes: 1 addition & 1 deletion +llms/+internal/callOllamaChatAPI.m
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@

parameters = buildParametersCall(model, messages, nvp);

[response, streamedText] = llms.internal.sendRequest(parameters,[],URL,nvp.TimeOut,nvp.StreamFun);
[response, streamedText] = llms.internal.sendRequestWrapper(parameters,[],URL,nvp.TimeOut,nvp.StreamFun);

% If call errors, "choices" will not be part of response.Body.Data, instead
% we get response.Body.Data.error
Expand Down
2 changes: 1 addition & 1 deletion +llms/+internal/callOpenAIChatAPI.m
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@

parameters = buildParametersCall(messages, functions, nvp);

[response, streamedText] = llms.internal.sendRequest(parameters,nvp.APIKey, END_POINT, nvp.TimeOut, nvp.StreamFun);
[response, streamedText] = llms.internal.sendRequestWrapper(parameters,nvp.APIKey, END_POINT, nvp.TimeOut, nvp.StreamFun);

% If call errors, "choices" will not be part of response.Body.Data, instead
% we get response.Body.Data.error
Expand Down
5 changes: 5 additions & 0 deletions +llms/+internal/sendRequestWrapper.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
function [response, streamedText] = sendRequestWrapper(varargin)
% This function is undocumented and will change in a future release

% A wrapper around sendRequest to have a test seam
[response, streamedText] = llms.internal.sendRequest(varargin{:});
6 changes: 6 additions & 0 deletions +llms/+internal/textGenerator.m
Original file line number Diff line number Diff line change
Expand Up @@ -28,4 +28,10 @@
properties (Access=protected)
StreamFun
end

methods
function hObj = set.StopSequences(hObj,value)
hObj.StopSequences = string(value);
end
end
end
3 changes: 2 additions & 1 deletion +llms/+stream/responseStreamer.m
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,8 @@
end
this.StreamFun('');
this.ResponseText = txt;
else
elseif isfield(json.choices,"delta") && ...
isfield(json.choices.delta,"content")
txt = json.choices.delta.content;
this.StreamFun(txt);
this.ResponseText = [this.ResponseText txt];
Expand Down
1 change: 1 addition & 0 deletions +llms/+utils/mustBeValidStop.m
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ function mustBeValidStop(value)
if ~isempty(value)
mustBeVector(value);
mustBeNonzeroLengthText(value);
value = string(value);
% This restriction is set by the OpenAI API
if numel(value)>4
error("llms:stopSequencesMustHaveMax4Elements", llms.utils.errorMessageCatalog.getMessage("llms:stopSequencesMustHaveMax4Elements"));
Expand Down
4 changes: 4 additions & 0 deletions .github/CODEOWNERS
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
# Code owners, to get auto-filled reviewer lists

# To start with, we just assume everyone in the core team is included on all reviews
* @adulai @ccreutzi @debymf @MiriamScharnke @vpapanasta
2 changes: 1 addition & 1 deletion .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ jobs:
- name: Pull models
run: |
ollama pull mistral
ollama pull bakllava
ollama pull moondream
OLLAMA_HOST=127.0.0.1:11435 ollama pull qwen2:0.5b
- name: Set up MATLAB
uses: matlab-actions/setup-matlab@v2
Expand Down
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
*.env
*.asv
*.mat
!tests/recordings/*.mat
startup.m
papers_to_read.csv
data/*
Expand Down
2 changes: 1 addition & 1 deletion extractOpenAIEmbeddings.m
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@
end


response = llms.internal.sendRequest(parameters,key, END_POINT, nvp.TimeOut);
response = llms.internal.sendRequestWrapper(parameters,key, END_POINT, nvp.TimeOut);

if isfield(response.Body.Data, "data")
emb = [response.Body.Data.data.embedding];
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
function [response, streamedText] = sendRequestWrapper(parameters, token, varargin)
% This function is undocumented and will change in a future release

% A wrapper around sendRequest to have a test seam
persistent seenCalls
if isempty(seenCalls)
seenCalls = cell(0,2);
end

persistent filename

if nargin == 1 && isequal(parameters,"close")
save(filename+".mat","seenCalls");
seenCalls = cell(0,2);
return
end

if nargin==2 && isequal(parameters,"open")
filename = token;
return
end

streamFunCalls = {};
hasCallback = nargin >= 5 && isa(varargin{3},'function_handle');
if hasCallback
streamFun = varargin{3};
end
function wrappedStreamFun(varargin)
streamFunCalls(end+1) = varargin;
streamFun(varargin{:});
end
if hasCallback
varargin{3} = @wrappedStreamFun;
end


[response, streamedText] = llms.internal.sendRequest(parameters, token, varargin{:});

seenCalls(end+1,:) = {{parameters},{response,streamFunCalls,streamedText}};
end
2 changes: 2 additions & 0 deletions tests/private/recording-doubles/addpath.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
function addpath(~)
% ignore addpath calls in examples
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
function [response, streamedText] = sendRequestWrapper(parameters, token, varargin)
% This function is undocumented and will change in a future release

% A wrapper around sendRequest to have a test seam
persistent seenCalls
if isempty(seenCalls)
seenCalls = cell(0,2);
end

if nargin == 1 && isequal(parameters,"close")
seenCalls = cell(0,2);
return
end

if nargin==2 && isequal(parameters,"open")
load(token+".mat","seenCalls");
return
end

result = seenCalls{1,2};
response = result{1};
streamFunCalls = result{2};
streamedText = result{3};

if nargin >= 5 && isa(varargin{3},'function_handle')
streamFun = varargin{3};
cellfun(streamFun, streamFunCalls);
end

seenCalls(1,:) = [];
2 changes: 2 additions & 0 deletions tests/private/replaying-doubles/addpath.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
function addpath(~)
% ignore addpath calls in examples
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file added tests/recordings/CreateSimpleChatBot.mat
Binary file not shown.
Binary file added tests/recordings/CreateSimpleOllamaChatBot.mat
Binary file not shown.
Binary file added tests/recordings/DescribeImagesUsingChatGPT.mat
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
12 changes: 12 additions & 0 deletions tests/recordings/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
# Test Double Recordings

Testing the examples typically takes a long time and tends to have false negatives relatively often, mostly due to timeout errors.

The point of testing the examples is not to test that we can connect to the servers. We have other test points for that. Hence, we insert a “test double” while testing the examples that keeps recordings of previous interactions with the servers and just replays the responses.

This directory contains those recordings.

## Generating Recordings

To generate or re-generate recordings (e.g., after changing an example, or making relevant software changes), open [`texampleTests.m`](../texampleTests.m) and in `setUpAndTearDowns`, change `capture = false;` to `capture = true;`. Then, run the test points relevant to the example(s) in question, and change `capture` back to `false`.

Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file added tests/recordings/UsingDALLEToEditImages.mat
Binary file not shown.
Binary file added tests/recordings/UsingDALLEToGenerateImages.mat
Binary file not shown.
69 changes: 55 additions & 14 deletions tests/texampleTests.m
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,25 @@
ChatBotExample = {"CreateSimpleChatBot", "CreateSimpleOllamaChatBot"};
end

properties
TestDir;
end

methods (TestClassSetup)
function setUpAndTearDowns(testCase)
% Capture and replay server interactions
testCase.TestDir = fileparts(mfilename("fullpath"));
import matlab.unittest.fixtures.PathFixture
capture = false; % run in capture or replay mode, cf. recordings/README.md

if capture
testCase.applyFixture(PathFixture( ...
fullfile(testCase.TestDir,"private","recording-doubles")));
else
testCase.applyFixture(PathFixture( ...
fullfile(testCase.TestDir,"private","replaying-doubles")));
end

import matlab.unittest.fixtures.CurrentFolderFixture
testCase.applyFixture(CurrentFolderFixture("../examples/mlx-scripts"));

Expand All @@ -29,22 +45,39 @@ function setUpAndTearDowns(testCase)
testCase.addTeardown(@() iCloseAll());
end
end


methods
function startCapture(testCase,testName)
llms.internal.sendRequestWrapper("open", ...
fullfile(testCase.TestDir,"recordings",testName));
end
end

methods(TestMethodTeardown)
function closeCapture(~)
llms.internal.sendRequestWrapper("close");
end
end

methods(Test)
function testAnalyzeScientificPapersUsingFunctionCalls(~)
function testAnalyzeScientificPapersUsingFunctionCalls(testCase)
testCase.startCapture("AnalyzeScientificPapersUsingFunctionCalls");
AnalyzeScientificPapersUsingFunctionCalls;
end

function testAnalyzeSentimentinTextUsingChatGPTinJSONMode(testCase)
testCase.startCapture("AnalyzeSentimentinTextUsingChatGPTinJSONMode");
testCase.verifyWarning(@AnalyzeSentimentinTextUsingChatGPTinJSONMode,...
"llms:warningJsonInstruction");
end

function testAnalyzeTextDataUsingParallelFunctionCallwithChatGPT(~)
function testAnalyzeTextDataUsingParallelFunctionCallwithChatGPT(testCase)
testCase.startCapture("AnalyzeTextDataUsingParallelFunctionCallwithChatGPT");
AnalyzeTextDataUsingParallelFunctionCallwithChatGPT;
end

function testCreateSimpleChatBot(testCase,ChatBotExample)
testCase.startCapture(ChatBotExample);
% set up a fake input command, returning canned user prompts
count = 0;
prompts = [
Expand Down Expand Up @@ -85,43 +118,51 @@ function testCreateSimpleChatBot(testCase,ChatBotExample)
testCase.verifySize(messages.Messages,[1 2*(count-1)]);
end

function testDescribeImagesUsingChatGPT(~)
function testDescribeImagesUsingChatGPT(testCase)
testCase.startCapture("DescribeImagesUsingChatGPT");
DescribeImagesUsingChatGPT;
end

function testInformationRetrievalUsingOpenAIDocumentEmbedding(~)
function testInformationRetrievalUsingOpenAIDocumentEmbedding(testCase)
testCase.startCapture("InformationRetrievalUsingOpenAIDocumentEmbedding");
InformationRetrievalUsingOpenAIDocumentEmbedding;
end

function testProcessGeneratedTextinRealTimebyUsingChatGPTinStreamingMode(~)
function testProcessGeneratedTextinRealTimebyUsingChatGPTinStreamingMode(testCase)
testCase.startCapture("ProcessGeneratedTextinRealTimebyUsingChatGPTinStreamingMode");
ProcessGeneratedTextinRealTimebyUsingChatGPTinStreamingMode;
end

function testProcessGeneratedTextInRealTimeByUsingOllamaInStreamingMode(~)
function testProcessGeneratedTextInRealTimeByUsingOllamaInStreamingMode(testCase)
testCase.startCapture("ProcessGeneratedTextInRealTimeByUsingOllamaInStreamingMode");
ProcessGeneratedTextInRealTimeByUsingOllamaInStreamingMode;
end

function testRetrievalAugmentedGenerationUsingChatGPTandMATLAB(~)
function testRetrievalAugmentedGenerationUsingChatGPTandMATLAB(testCase)
testCase.startCapture("RetrievalAugmentedGenerationUsingChatGPTandMATLAB");
RetrievalAugmentedGenerationUsingChatGPTandMATLAB;
end

function testRetrievalAugmentedGenerationUsingOllamaAndMATLAB(~)
function testRetrievalAugmentedGenerationUsingOllamaAndMATLAB(testCase)
testCase.startCapture("RetrievalAugmentedGenerationUsingOllamaAndMATLAB");
RetrievalAugmentedGenerationUsingOllamaAndMATLAB;
end

function testSummarizeLargeDocumentsUsingChatGPTandMATLAB(~)
function testSummarizeLargeDocumentsUsingChatGPTandMATLAB(testCase)
testCase.startCapture("SummarizeLargeDocumentsUsingChatGPTandMATLAB");
SummarizeLargeDocumentsUsingChatGPTandMATLAB;
end

function testUsingDALLEToEditImages(~)
function testUsingDALLEToEditImages(testCase)
testCase.startCapture("UsingDALLEToEditImages");
UsingDALLEToEditImages;
end

function testUsingDALLEToGenerateImages(~)
function testUsingDALLEToGenerateImages(testCase)
testCase.startCapture("UsingDALLEToGenerateImages");
UsingDALLEToGenerateImages;
end
end

end
end

function iCloseAll()
Expand Down
2 changes: 1 addition & 1 deletion tests/tollamaChat.m
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ function seedFixesResult(testCase)
end

function generateWithImages(testCase)
chat = ollamaChat("bakllava");
chat = ollamaChat("moondream");
image_path = "peppers.png";
emptyMessages = messageHistory;
messages = addUserMessageWithImages(emptyMessages,"What is in the image?",image_path);
Expand Down
Loading

0 comments on commit c357e75

Please sign in to comment.