Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions lib/langchain/llm/ollama.rb
Original file line number Diff line number Diff line change
Expand Up @@ -287,8 +287,16 @@ def auth_headers
end

def json_responses_chunk_handler(&block)
incomplete_chunk_line = nil
proc do |chunk, _size|
chunk.split("\n").each do |chunk_line|
if incomplete_chunk_line
chunk_line = incomplete_chunk_line + chunk_line
incomplete_chunk_line = nil
end

next incomplete_chunk_line = chunk_line unless chunk_line.end_with?("}")

parsed_chunk = JSON.parse(chunk_line)
block.call(parsed_chunk)
end
Expand Down

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

20 changes: 20 additions & 0 deletions spec/lib/langchain/llm/ollama_spec.rb
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,11 @@
expect(response.completion).to eq("Complicated.")
end

it "handles multiline json", :vcr do
expect { response }.not_to raise_error
expect(response.completion).to eq("Complicated.")
end

it "does not use streamed responses", vcr: {cassette_name: "Langchain_LLM_Ollama_complete_returns_a_completion"} do
expect(client).to receive(:post).with("api/generate", hash_including(stream: false)).and_call_original
response
Expand All @@ -102,6 +107,11 @@
expect(response.total_tokens).to eq(36)
end

it "handles multiline json", :vcr do
expect { response }.not_to raise_error
expect(response.completion).to eq("Complicated.")
end

it "uses streamed responses", vcr: {cassette_name: "Langchain_LLM_Ollama_complete_when_passing_a_block_returns_a_completion"} do
expect(client).to receive(:post).with("api/generate", hash_including(stream: true)).and_call_original
response
Expand All @@ -125,6 +135,11 @@
expect(response.chat_completion).to include("I'm just a language model")
end

it "handles multiline json", :vcr do
expect { response }.not_to raise_error
expect(response.chat_completion).to include("I'm just a language model")
end

it "does not use streamed responses", vcr: {cassette_name: "Langchain_LLM_Ollama_chat_returns_a_chat_completion"} do
expect(client).to receive(:post).with("api/chat", hash_including(stream: false)).and_call_original
response
Expand All @@ -139,6 +154,11 @@
expect(response.chat_completion).to include("I'm just a language model")
end

it "handles multiline json", :vcr do
expect { response }.not_to raise_error
expect(response.chat_completion).to include("I'm just a language model")
end

it "uses streamed responses", vcr: {cassette_name: "Langchain_LLM_Ollama_chat_when_passing_a_block_returns_a_chat_completion"} do
expect(client).to receive(:post).with("api/chat", hash_including(stream: true)).and_call_original
response
Expand Down