From b3bf0c6c9be12db9928fcc187f52fbf6e1f467dd Mon Sep 17 00:00:00 2001 From: Sergio Bayona Date: Mon, 20 May 2024 15:56:32 -0500 Subject: [PATCH] Allow openai client passthrough (default) behaviour When the `response_model` parameter in the `chat` method is not defined, it should let the openai client operate as originally intended. --- lib/instructor/openai/patch.rb | 4 +- spec/openai/patch_spec.rb | 14 +++ .../patching_spec/standard_usage.yml | 97 +++++++++++++++++++ 3 files changed, 114 insertions(+), 1 deletion(-) create mode 100644 spec/vcr_cassettes/patching_spec/standard_usage.yml diff --git a/lib/instructor/openai/patch.rb b/lib/instructor/openai/patch.rb index 9456a24..3288fd3 100644 --- a/lib/instructor/openai/patch.rb +++ b/lib/instructor/openai/patch.rb @@ -29,6 +29,8 @@ def with_retries(max_retries, exceptions, &block) # @param validation_context [Hash] The validation context for the parameters. Optional. # @return [Object] The processed response. def chat(parameters:, response_model: nil, max_retries: 0, validation_context: nil) + return json_post(path: '/chat/completions', parameters:) if response_model.nil? + with_retries(max_retries, [JSON::ParserError, Instructor::ValidationError, Faraday::ParsingError]) do model = determine_model(response_model) function = build_function(model) @@ -103,7 +105,7 @@ def process_single_response(parsed_response, model) # Determines the response model based on the provided value. # # @param response_model [Class] The response model class or typed array. - # @return [Class] The determined response model class. + # @return [Class] The response model. def determine_model(response_model) if response_model.is_a?(T::Types::TypedArray) @iterable = true diff --git a/spec/openai/patch_spec.rb b/spec/openai/patch_spec.rb index e27f30d..3752746 100644 --- a/spec/openai/patch_spec.rb +++ b/spec/openai/patch_spec.rb @@ -188,4 +188,18 @@ def self.name end.to raise_error(Instructor::ValidationError) end end + + describe 'when the client is used ia a standard manner' do + it 'does not raise an error when the client is used in a standard manner', vcr: 'patching_spec/standard_usage' do + response = patched_client.new.chat( + parameters: { + model: 'gpt-3.5-turbo', + messages: [{ role: 'user', content: 'How is the weather today in New York?' }] + } + ) + + expect(response).to be_a(Hash) + expect(response.dig('choices', 0, 'message', 'content')).to be_a(String) + end + end end diff --git a/spec/vcr_cassettes/patching_spec/standard_usage.yml b/spec/vcr_cassettes/patching_spec/standard_usage.yml new file mode 100644 index 0000000..3c91ab1 --- /dev/null +++ b/spec/vcr_cassettes/patching_spec/standard_usage.yml @@ -0,0 +1,97 @@ +--- +http_interactions: +- request: + method: post + uri: https://api.openai.com/v1/chat/completions + body: + encoding: UTF-8 + string: '{"model":"gpt-3.5-turbo","messages":[{"role":"user","content":"How + is the weather today in New York?"}]}' + headers: + Content-Type: + - application/json + Authorization: + - Bearer + Accept-Encoding: + - gzip;q=1.0,deflate;q=0.6,identity;q=0.3 + Accept: + - "*/*" + User-Agent: + - Ruby + response: + status: + code: 200 + message: OK + headers: + Date: + - Mon, 20 May 2024 20:18:44 GMT + Content-Type: + - application/json + Transfer-Encoding: + - chunked + Connection: + - keep-alive + Openai-Organization: + - user-jtftkqrbreteg5pmdrfzchv6 + Openai-Processing-Ms: + - '1141' + Openai-Version: + - '2020-10-01' + Strict-Transport-Security: + - max-age=15724800; includeSubDomains + X-Ratelimit-Limit-Requests: + - '10000' + X-Ratelimit-Limit-Tokens: + - '60000' + X-Ratelimit-Remaining-Requests: + - '9999' + X-Ratelimit-Remaining-Tokens: + - '59973' + X-Ratelimit-Reset-Requests: + - 8.64s + X-Ratelimit-Reset-Tokens: + - 27ms + X-Request-Id: + - req_9cd156b89ffbc49d042594df684cd71c + Cf-Cache-Status: + - DYNAMIC + Set-Cookie: + - __cf_bm=iw7.dE814kHN0QINKJy3dRtjL44VSwwBZFyDHvN2pNo-1716236324-1.0.1.1-_sZi0S6Fm2fn7rSXA7NQ22IbWSrA0yBXtvFZ7BgT06q2J6upyZqiU7WDAqYfK3_DCCsi.xd2.aDu1erMGdb9dg; + path=/; expires=Mon, 20-May-24 20:48:44 GMT; domain=.api.openai.com; HttpOnly; + Secure; SameSite=None + - _cfuvid=l.4Wi13joGQwrPn0H.rLjBrxZJCHqY15_r5zTRua9H0-1716236324578-0.0.1.1-604800000; + path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + Server: + - cloudflare + Cf-Ray: + - 886f047cea6374ac-MIA + Alt-Svc: + - h3=":443"; ma=86400 + body: + encoding: ASCII-8BIT + string: | + { + "id": "chatcmpl-9R3kx1j8twG05UJGM5ReVEpjUUHtL", + "object": "chat.completion", + "created": 1716236323, + "model": "gpt-3.5-turbo-0125", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "I'm sorry, I cannot provide real-time weather information. I recommend checking a reliable weather website or app for the most up-to-date weather conditions in New York." + }, + "logprobs": null, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": 16, + "completion_tokens": 33, + "total_tokens": 49 + }, + "system_fingerprint": null + } + recorded_at: Mon, 20 May 2024 20:18:44 GMT +recorded_with: VCR 6.2.0