diff --git a/+llms/+internal/callAzureChatAPI.m b/+llms/+internal/callAzureChatAPI.m index bb73053..c053b4a 100644 --- a/+llms/+internal/callAzureChatAPI.m +++ b/+llms/+internal/callAzureChatAPI.m @@ -64,7 +64,7 @@ parameters = buildParametersCall(messages, functions, nvp); -[response, streamedText] = llms.internal.sendRequest(parameters,nvp.APIKey, URL, nvp.TimeOut, nvp.StreamFun); +[response, streamedText] = llms.internal.sendRequestWrapper(parameters,nvp.APIKey, URL, nvp.TimeOut, nvp.StreamFun); % If call errors, "choices" will not be part of response.Body.Data, instead % we get response.Body.Data.error diff --git a/+llms/+internal/callOllamaChatAPI.m b/+llms/+internal/callOllamaChatAPI.m index 4596231..0bad15f 100644 --- a/+llms/+internal/callOllamaChatAPI.m +++ b/+llms/+internal/callOllamaChatAPI.m @@ -53,7 +53,7 @@ parameters = buildParametersCall(model, messages, nvp); -[response, streamedText] = llms.internal.sendRequest(parameters,[],URL,nvp.TimeOut,nvp.StreamFun); +[response, streamedText] = llms.internal.sendRequestWrapper(parameters,[],URL,nvp.TimeOut,nvp.StreamFun); % If call errors, "choices" will not be part of response.Body.Data, instead % we get response.Body.Data.error diff --git a/+llms/+internal/callOpenAIChatAPI.m b/+llms/+internal/callOpenAIChatAPI.m index 8d58fd4..742ce50 100644 --- a/+llms/+internal/callOpenAIChatAPI.m +++ b/+llms/+internal/callOpenAIChatAPI.m @@ -62,7 +62,7 @@ parameters = buildParametersCall(messages, functions, nvp); -[response, streamedText] = llms.internal.sendRequest(parameters,nvp.APIKey, END_POINT, nvp.TimeOut, nvp.StreamFun); +[response, streamedText] = llms.internal.sendRequestWrapper(parameters,nvp.APIKey, END_POINT, nvp.TimeOut, nvp.StreamFun); % If call errors, "choices" will not be part of response.Body.Data, instead % we get response.Body.Data.error diff --git a/+llms/+internal/sendRequestWrapper.m b/+llms/+internal/sendRequestWrapper.m new file mode 100644 index 0000000..18160ce --- /dev/null +++ b/+llms/+internal/sendRequestWrapper.m @@ -0,0 +1,5 @@ +function [response, streamedText] = sendRequestWrapper(varargin) +% This function is undocumented and will change in a future release + +% A wrapper around sendRequest to have a test seam +[response, streamedText] = llms.internal.sendRequest(varargin{:}); diff --git a/+llms/+internal/textGenerator.m b/+llms/+internal/textGenerator.m index f6cb167..204e516 100644 --- a/+llms/+internal/textGenerator.m +++ b/+llms/+internal/textGenerator.m @@ -28,4 +28,10 @@ properties (Access=protected) StreamFun end + + methods + function hObj = set.StopSequences(hObj,value) + hObj.StopSequences = string(value); + end + end end diff --git a/+llms/+stream/responseStreamer.m b/+llms/+stream/responseStreamer.m index b13048d..d3b60b1 100644 --- a/+llms/+stream/responseStreamer.m +++ b/+llms/+stream/responseStreamer.m @@ -84,7 +84,8 @@ end this.StreamFun(''); this.ResponseText = txt; - else + elseif isfield(json.choices,"delta") && ... + isfield(json.choices.delta,"content") txt = json.choices.delta.content; this.StreamFun(txt); this.ResponseText = [this.ResponseText txt]; diff --git a/+llms/+utils/mustBeValidStop.m b/+llms/+utils/mustBeValidStop.m index f3862c7..7301a2c 100644 --- a/+llms/+utils/mustBeValidStop.m +++ b/+llms/+utils/mustBeValidStop.m @@ -5,6 +5,7 @@ function mustBeValidStop(value) if ~isempty(value) mustBeVector(value); mustBeNonzeroLengthText(value); + value = string(value); % This restriction is set by the OpenAI API if numel(value)>4 error("llms:stopSequencesMustHaveMax4Elements", llms.utils.errorMessageCatalog.getMessage("llms:stopSequencesMustHaveMax4Elements")); diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 0000000..cf5aa55 --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1,4 @@ +# Code owners, to get auto-filled reviewer lists + +# To start with, we just assume everyone in the core team is included on all reviews +* @adulai @ccreutzi @debymf @MiriamScharnke @vpapanasta \ No newline at end of file diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d7babcc..6be9f48 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -30,7 +30,7 @@ jobs: - name: Pull models run: | ollama pull mistral - ollama pull bakllava + ollama pull moondream OLLAMA_HOST=127.0.0.1:11435 ollama pull qwen2:0.5b - name: Set up MATLAB uses: matlab-actions/setup-matlab@v2 diff --git a/.gitignore b/.gitignore index 4886537..510c487 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,7 @@ *.env *.asv *.mat +!tests/recordings/*.mat startup.m papers_to_read.csv data/* diff --git a/extractOpenAIEmbeddings.m b/extractOpenAIEmbeddings.m index 6813e0a..3f6b1a9 100644 --- a/extractOpenAIEmbeddings.m +++ b/extractOpenAIEmbeddings.m @@ -47,7 +47,7 @@ end -response = llms.internal.sendRequest(parameters,key, END_POINT, nvp.TimeOut); +response = llms.internal.sendRequestWrapper(parameters,key, END_POINT, nvp.TimeOut); if isfield(response.Body.Data, "data") emb = [response.Body.Data.data.embedding]; diff --git a/tests/private/recording-doubles/+llms/+internal/sendRequestWrapper.m b/tests/private/recording-doubles/+llms/+internal/sendRequestWrapper.m new file mode 100644 index 0000000..ab4858c --- /dev/null +++ b/tests/private/recording-doubles/+llms/+internal/sendRequestWrapper.m @@ -0,0 +1,40 @@ +function [response, streamedText] = sendRequestWrapper(parameters, token, varargin) +% This function is undocumented and will change in a future release + +% A wrapper around sendRequest to have a test seam +persistent seenCalls +if isempty(seenCalls) + seenCalls = cell(0,2); +end + +persistent filename + +if nargin == 1 && isequal(parameters,"close") + save(filename+".mat","seenCalls"); + seenCalls = cell(0,2); + return +end + +if nargin==2 && isequal(parameters,"open") + filename = token; + return +end + +streamFunCalls = {}; +hasCallback = nargin >= 5 && isa(varargin{3},'function_handle'); +if hasCallback + streamFun = varargin{3}; +end +function wrappedStreamFun(varargin) + streamFunCalls(end+1) = varargin; + streamFun(varargin{:}); +end +if hasCallback + varargin{3} = @wrappedStreamFun; +end + + +[response, streamedText] = llms.internal.sendRequest(parameters, token, varargin{:}); + +seenCalls(end+1,:) = {{parameters},{response,streamFunCalls,streamedText}}; +end diff --git a/tests/private/recording-doubles/addpath.m b/tests/private/recording-doubles/addpath.m new file mode 100644 index 0000000..d1f1059 --- /dev/null +++ b/tests/private/recording-doubles/addpath.m @@ -0,0 +1,2 @@ +function addpath(~) +% ignore addpath calls in examples diff --git a/tests/private/replaying-doubles/+llms/+internal/sendRequestWrapper.m b/tests/private/replaying-doubles/+llms/+internal/sendRequestWrapper.m new file mode 100644 index 0000000..0b689d7 --- /dev/null +++ b/tests/private/replaying-doubles/+llms/+internal/sendRequestWrapper.m @@ -0,0 +1,30 @@ +function [response, streamedText] = sendRequestWrapper(parameters, token, varargin) +% This function is undocumented and will change in a future release + +% A wrapper around sendRequest to have a test seam +persistent seenCalls +if isempty(seenCalls) + seenCalls = cell(0,2); +end + +if nargin == 1 && isequal(parameters,"close") + seenCalls = cell(0,2); + return +end + +if nargin==2 && isequal(parameters,"open") + load(token+".mat","seenCalls"); + return +end + +result = seenCalls{1,2}; +response = result{1}; +streamFunCalls = result{2}; +streamedText = result{3}; + +if nargin >= 5 && isa(varargin{3},'function_handle') + streamFun = varargin{3}; + cellfun(streamFun, streamFunCalls); +end + +seenCalls(1,:) = []; diff --git a/tests/private/replaying-doubles/addpath.m b/tests/private/replaying-doubles/addpath.m new file mode 100644 index 0000000..d1f1059 --- /dev/null +++ b/tests/private/replaying-doubles/addpath.m @@ -0,0 +1,2 @@ +function addpath(~) +% ignore addpath calls in examples diff --git a/tests/recordings/AnalyzeScientificPapersUsingFunctionCalls.mat b/tests/recordings/AnalyzeScientificPapersUsingFunctionCalls.mat new file mode 100644 index 0000000..db2a334 Binary files /dev/null and b/tests/recordings/AnalyzeScientificPapersUsingFunctionCalls.mat differ diff --git a/tests/recordings/AnalyzeSentimentinTextUsingChatGPTinJSONMode.mat b/tests/recordings/AnalyzeSentimentinTextUsingChatGPTinJSONMode.mat new file mode 100644 index 0000000..310d0e8 Binary files /dev/null and b/tests/recordings/AnalyzeSentimentinTextUsingChatGPTinJSONMode.mat differ diff --git a/tests/recordings/AnalyzeTextDataUsingParallelFunctionCallwithChatGPT.mat b/tests/recordings/AnalyzeTextDataUsingParallelFunctionCallwithChatGPT.mat new file mode 100644 index 0000000..c51cde7 Binary files /dev/null and b/tests/recordings/AnalyzeTextDataUsingParallelFunctionCallwithChatGPT.mat differ diff --git a/tests/recordings/CreateSimpleChatBot.mat b/tests/recordings/CreateSimpleChatBot.mat new file mode 100644 index 0000000..857ff8b Binary files /dev/null and b/tests/recordings/CreateSimpleChatBot.mat differ diff --git a/tests/recordings/CreateSimpleOllamaChatBot.mat b/tests/recordings/CreateSimpleOllamaChatBot.mat new file mode 100644 index 0000000..eadaa28 Binary files /dev/null and b/tests/recordings/CreateSimpleOllamaChatBot.mat differ diff --git a/tests/recordings/DescribeImagesUsingChatGPT.mat b/tests/recordings/DescribeImagesUsingChatGPT.mat new file mode 100644 index 0000000..227da1b Binary files /dev/null and b/tests/recordings/DescribeImagesUsingChatGPT.mat differ diff --git a/tests/recordings/InformationRetrievalUsingOpenAIDocumentEmbedding.mat b/tests/recordings/InformationRetrievalUsingOpenAIDocumentEmbedding.mat new file mode 100644 index 0000000..d82f627 Binary files /dev/null and b/tests/recordings/InformationRetrievalUsingOpenAIDocumentEmbedding.mat differ diff --git a/tests/recordings/ProcessGeneratedTextInRealTimeByUsingOllamaInStreamingMode.mat b/tests/recordings/ProcessGeneratedTextInRealTimeByUsingOllamaInStreamingMode.mat new file mode 100644 index 0000000..5bdf6d7 Binary files /dev/null and b/tests/recordings/ProcessGeneratedTextInRealTimeByUsingOllamaInStreamingMode.mat differ diff --git a/tests/recordings/ProcessGeneratedTextinRealTimebyUsingChatGPTinStreamingMode.mat b/tests/recordings/ProcessGeneratedTextinRealTimebyUsingChatGPTinStreamingMode.mat new file mode 100644 index 0000000..cf73f21 Binary files /dev/null and b/tests/recordings/ProcessGeneratedTextinRealTimebyUsingChatGPTinStreamingMode.mat differ diff --git a/tests/recordings/README.md b/tests/recordings/README.md new file mode 100644 index 0000000..010208d --- /dev/null +++ b/tests/recordings/README.md @@ -0,0 +1,12 @@ +# Test Double Recordings + +Testing the examples typically takes a long time and tends to have false negatives relatively often, mostly due to timeout errors. + +The point of testing the examples is not to test that we can connect to the servers. We have other test points for that. Hence, we insert a “test double” while testing the examples that keeps recordings of previous interactions with the servers and just replays the responses. + +This directory contains those recordings. + +## Generating Recordings + +To generate or re-generate recordings (e.g., after changing an example, or making relevant software changes), open [`texampleTests.m`](../texampleTests.m) and in `setUpAndTearDowns`, change `capture = false;` to `capture = true;`. Then, run the test points relevant to the example(s) in question, and change `capture` back to `false`. + diff --git a/tests/recordings/RetrievalAugmentedGenerationUsingChatGPTandMATLAB.mat b/tests/recordings/RetrievalAugmentedGenerationUsingChatGPTandMATLAB.mat new file mode 100644 index 0000000..6bd5878 Binary files /dev/null and b/tests/recordings/RetrievalAugmentedGenerationUsingChatGPTandMATLAB.mat differ diff --git a/tests/recordings/RetrievalAugmentedGenerationUsingOllamaAndMATLAB.mat b/tests/recordings/RetrievalAugmentedGenerationUsingOllamaAndMATLAB.mat new file mode 100644 index 0000000..8329b23 Binary files /dev/null and b/tests/recordings/RetrievalAugmentedGenerationUsingOllamaAndMATLAB.mat differ diff --git a/tests/recordings/SummarizeLargeDocumentsUsingChatGPTandMATLAB.mat b/tests/recordings/SummarizeLargeDocumentsUsingChatGPTandMATLAB.mat new file mode 100644 index 0000000..f44077b Binary files /dev/null and b/tests/recordings/SummarizeLargeDocumentsUsingChatGPTandMATLAB.mat differ diff --git a/tests/recordings/UsingDALLEToEditImages.mat b/tests/recordings/UsingDALLEToEditImages.mat new file mode 100644 index 0000000..f20b183 Binary files /dev/null and b/tests/recordings/UsingDALLEToEditImages.mat differ diff --git a/tests/recordings/UsingDALLEToGenerateImages.mat b/tests/recordings/UsingDALLEToGenerateImages.mat new file mode 100644 index 0000000..58b842b Binary files /dev/null and b/tests/recordings/UsingDALLEToGenerateImages.mat differ diff --git a/tests/texampleTests.m b/tests/texampleTests.m index ea5955b..e08a5c9 100644 --- a/tests/texampleTests.m +++ b/tests/texampleTests.m @@ -8,9 +8,25 @@ ChatBotExample = {"CreateSimpleChatBot", "CreateSimpleOllamaChatBot"}; end + properties + TestDir; + end methods (TestClassSetup) function setUpAndTearDowns(testCase) + % Capture and replay server interactions + testCase.TestDir = fileparts(mfilename("fullpath")); + import matlab.unittest.fixtures.PathFixture + capture = false; % run in capture or replay mode, cf. recordings/README.md + + if capture + testCase.applyFixture(PathFixture( ... + fullfile(testCase.TestDir,"private","recording-doubles"))); + else + testCase.applyFixture(PathFixture( ... + fullfile(testCase.TestDir,"private","replaying-doubles"))); + end + import matlab.unittest.fixtures.CurrentFolderFixture testCase.applyFixture(CurrentFolderFixture("../examples/mlx-scripts")); @@ -29,22 +45,39 @@ function setUpAndTearDowns(testCase) testCase.addTeardown(@() iCloseAll()); end end - + + methods + function startCapture(testCase,testName) + llms.internal.sendRequestWrapper("open", ... + fullfile(testCase.TestDir,"recordings",testName)); + end + end + + methods(TestMethodTeardown) + function closeCapture(~) + llms.internal.sendRequestWrapper("close"); + end + end + methods(Test) - function testAnalyzeScientificPapersUsingFunctionCalls(~) + function testAnalyzeScientificPapersUsingFunctionCalls(testCase) + testCase.startCapture("AnalyzeScientificPapersUsingFunctionCalls"); AnalyzeScientificPapersUsingFunctionCalls; end function testAnalyzeSentimentinTextUsingChatGPTinJSONMode(testCase) + testCase.startCapture("AnalyzeSentimentinTextUsingChatGPTinJSONMode"); testCase.verifyWarning(@AnalyzeSentimentinTextUsingChatGPTinJSONMode,... "llms:warningJsonInstruction"); end - function testAnalyzeTextDataUsingParallelFunctionCallwithChatGPT(~) + function testAnalyzeTextDataUsingParallelFunctionCallwithChatGPT(testCase) + testCase.startCapture("AnalyzeTextDataUsingParallelFunctionCallwithChatGPT"); AnalyzeTextDataUsingParallelFunctionCallwithChatGPT; end function testCreateSimpleChatBot(testCase,ChatBotExample) + testCase.startCapture(ChatBotExample); % set up a fake input command, returning canned user prompts count = 0; prompts = [ @@ -85,43 +118,51 @@ function testCreateSimpleChatBot(testCase,ChatBotExample) testCase.verifySize(messages.Messages,[1 2*(count-1)]); end - function testDescribeImagesUsingChatGPT(~) + function testDescribeImagesUsingChatGPT(testCase) + testCase.startCapture("DescribeImagesUsingChatGPT"); DescribeImagesUsingChatGPT; end - function testInformationRetrievalUsingOpenAIDocumentEmbedding(~) + function testInformationRetrievalUsingOpenAIDocumentEmbedding(testCase) + testCase.startCapture("InformationRetrievalUsingOpenAIDocumentEmbedding"); InformationRetrievalUsingOpenAIDocumentEmbedding; end - function testProcessGeneratedTextinRealTimebyUsingChatGPTinStreamingMode(~) + function testProcessGeneratedTextinRealTimebyUsingChatGPTinStreamingMode(testCase) + testCase.startCapture("ProcessGeneratedTextinRealTimebyUsingChatGPTinStreamingMode"); ProcessGeneratedTextinRealTimebyUsingChatGPTinStreamingMode; end - function testProcessGeneratedTextInRealTimeByUsingOllamaInStreamingMode(~) + function testProcessGeneratedTextInRealTimeByUsingOllamaInStreamingMode(testCase) + testCase.startCapture("ProcessGeneratedTextInRealTimeByUsingOllamaInStreamingMode"); ProcessGeneratedTextInRealTimeByUsingOllamaInStreamingMode; end - function testRetrievalAugmentedGenerationUsingChatGPTandMATLAB(~) + function testRetrievalAugmentedGenerationUsingChatGPTandMATLAB(testCase) + testCase.startCapture("RetrievalAugmentedGenerationUsingChatGPTandMATLAB"); RetrievalAugmentedGenerationUsingChatGPTandMATLAB; end - function testRetrievalAugmentedGenerationUsingOllamaAndMATLAB(~) + function testRetrievalAugmentedGenerationUsingOllamaAndMATLAB(testCase) + testCase.startCapture("RetrievalAugmentedGenerationUsingOllamaAndMATLAB"); RetrievalAugmentedGenerationUsingOllamaAndMATLAB; end - function testSummarizeLargeDocumentsUsingChatGPTandMATLAB(~) + function testSummarizeLargeDocumentsUsingChatGPTandMATLAB(testCase) + testCase.startCapture("SummarizeLargeDocumentsUsingChatGPTandMATLAB"); SummarizeLargeDocumentsUsingChatGPTandMATLAB; end - function testUsingDALLEToEditImages(~) + function testUsingDALLEToEditImages(testCase) + testCase.startCapture("UsingDALLEToEditImages"); UsingDALLEToEditImages; end - function testUsingDALLEToGenerateImages(~) + function testUsingDALLEToGenerateImages(testCase) + testCase.startCapture("UsingDALLEToGenerateImages"); UsingDALLEToGenerateImages; end - end - + end end function iCloseAll() diff --git a/tests/tollamaChat.m b/tests/tollamaChat.m index 1040c4a..52d1376 100644 --- a/tests/tollamaChat.m +++ b/tests/tollamaChat.m @@ -99,7 +99,7 @@ function seedFixesResult(testCase) end function generateWithImages(testCase) - chat = ollamaChat("bakllava"); + chat = ollamaChat("moondream"); image_path = "peppers.png"; emptyMessages = messageHistory; messages = addUserMessageWithImages(emptyMessages,"What is in the image?",image_path); diff --git a/tests/topenAIChat.m b/tests/topenAIChat.m index e06db55..ad3f69e 100644 --- a/tests/topenAIChat.m +++ b/tests/topenAIChat.m @@ -97,6 +97,14 @@ function invalidInputsConstructor(testCase, InvalidConstructorInput) testCase.verifyError(@()openAIChat(InvalidConstructorInput.Input{:}), InvalidConstructorInput.Error); end + function generateWithStreamFunAndMaxNumTokens(testCase) + sf = @(x) fprintf("%s",x); + chat = openAIChat(StreamFun=sf); + result = generate(chat,"Why is a raven like a writing desk?",MaxNumTokens=5); + testCase.verifyClass(result,"string"); + testCase.verifyLessThan(strlength(result), 100); + end + function generateWithToolsAndStreamFunc(testCase) import matlab.unittest.constraints.HasField @@ -388,7 +396,6 @@ function keyNotFound(testCase) function validConstructorInput = iGetValidConstructorInput() % while it is valid to provide the key via an environment variable, % this test set does not use that, for easier setup -validFunction = openAIFunction("funName"); validConstructorInput = struct( ... "JustKey", struct( ... "Input",{{"APIKey","this-is-not-a-real-key"}}, ... @@ -396,7 +403,7 @@ function keyNotFound(testCase) "VerifyProperties", struct( ... "Temperature", {1}, ... "TopP", {1}, ... - "StopSequences", {{}}, ... + "StopSequences", {string([])}, ... "PresencePenalty", {0}, ... "FrequencyPenalty", {0}, ... "TimeOut", {10}, ... @@ -412,7 +419,7 @@ function keyNotFound(testCase) "VerifyProperties", struct( ... "Temperature", {1}, ... "TopP", {1}, ... - "StopSequences", {{}}, ... + "StopSequences", {string([])}, ... "PresencePenalty", {0}, ... "FrequencyPenalty", {0}, ... "TimeOut", {10}, ... @@ -428,7 +435,7 @@ function keyNotFound(testCase) "VerifyProperties", struct( ... "Temperature", {2}, ... "TopP", {1}, ... - "StopSequences", {{}}, ... + "StopSequences", {string([])}, ... "PresencePenalty", {0}, ... "FrequencyPenalty", {0}, ... "TimeOut", {10}, ... @@ -444,7 +451,7 @@ function keyNotFound(testCase) "VerifyProperties", struct( ... "Temperature", {1}, ... "TopP", {0.2}, ... - "StopSequences", {{}}, ... + "StopSequences", {string([])}, ... "PresencePenalty", {0}, ... "FrequencyPenalty", {0}, ... "TimeOut", {10}, ... @@ -470,13 +477,29 @@ function keyNotFound(testCase) "ResponseFormat", {"text"} ... ) ... ), ... + "StopSequencesCharVector", struct( ... + "Input",{{"APIKey","this-is-not-a-real-key","StopSequences",'supercalifragilistic'}}, ... + "ExpectedWarning", '', ... + "VerifyProperties", struct( ... + "Temperature", {1}, ... + "TopP", {1}, ... + "StopSequences", {"supercalifragilistic"}, ... + "PresencePenalty", {0}, ... + "FrequencyPenalty", {0}, ... + "TimeOut", {10}, ... + "FunctionNames", {[]}, ... + "ModelName", {"gpt-4o-mini"}, ... + "SystemPrompt", {[]}, ... + "ResponseFormat", {"text"} ... + ) ... + ), ... "PresencePenalty", struct( ... "Input",{{"APIKey","this-is-not-a-real-key","PresencePenalty",0.1}}, ... "ExpectedWarning", '', ... "VerifyProperties", struct( ... "Temperature", {1}, ... "TopP", {1}, ... - "StopSequences", {{}}, ... + "StopSequences", {string([])}, ... "PresencePenalty", {0.1}, ... "FrequencyPenalty", {0}, ... "TimeOut", {10}, ... @@ -492,7 +515,7 @@ function keyNotFound(testCase) "VerifyProperties", struct( ... "Temperature", {1}, ... "TopP", {1}, ... - "StopSequences", {{}}, ... + "StopSequences", {string([])}, ... "PresencePenalty", {0}, ... "FrequencyPenalty", {0.1}, ... "TimeOut", {10}, ... @@ -508,7 +531,7 @@ function keyNotFound(testCase) "VerifyProperties", struct( ... "Temperature", {1}, ... "TopP", {1}, ... - "StopSequences", {{}}, ... + "StopSequences", {string([])}, ... "PresencePenalty", {0}, ... "FrequencyPenalty", {0}, ... "TimeOut", {0.1}, ... @@ -524,7 +547,7 @@ function keyNotFound(testCase) "VerifyProperties", struct( ... "Temperature", {1}, ... "TopP", {1}, ... - "StopSequences", {{}}, ... + "StopSequences", {string([])}, ... "PresencePenalty", {0}, ... "FrequencyPenalty", {0}, ... "TimeOut", {10}, ...