Skip to content

Adds ability to capture HTTP errors #328

New issue

Have a question about this project? # for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “#”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? # to your account

Closed
wants to merge 3 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
53 changes: 53 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -110,6 +110,59 @@ OpenAI.configure do |config|
end
```

#### Handling Errors from the OpenAI API

The default, requests using this library will only return the `body` of the response (if one exists). You can change this behavior and have 4xx and 5xx HTTP status codes raise an `OpenAI::HTTP::Error` by using the following client configuration:

```ruby
client = OpenAI::Client.new(
access_token: "access_token_goes_here",
raise_error: true
)
```

or when configuring the gem:

```ruby
OpenAI.configure do |config|
config.access_token = ENV.fetch("OPENAI_ACCESS_TOKEN")
config.raise_error = true
end
```

With this configuration set, calls to any of the client methods (`chat`,
`completions`, `embeddings`, etc.) will all raise an error
(`OpenAI::HTTP::Error`) if the response's HTTP status code is 4xx or 5xx. This
special error gives you access to some key information about the response on the instance of the error raised:

```ruby
client = OpenAI::Client.new(
access_token: "access_token_goes_here",
raise_error: true
)
begin
response = client.chat(
parameters: {
model: "gpt-42", # providing an invalid model
messages: [{ role: "user", content: "Doh!"}]
})
rescue OpenAI::HTTP::Error => e
puts e.response[:status]
puts e.response[:headers]
puts e.response[:body]
end
# => 404
# {"date"=>"Wed, 06 Sep 2023 21:26:07 GMT", "content-type"=>"application/json; charset=utf-8", "transfer-encoding"=>"chunked", "connection"=>"keep-alive", "vary"=>"Origin", "x-request-id"=>"b834b1406029a174ac0ff2cc90cb6a87", "strict-transport-security"=>"max-age=15724800; includeSubDomains", "cf-cache-status"=>"DYNAMIC", "server"=>"cloudflare", "cf-ray"=>"8029c9d88ddf424d-EWR", "content-encoding"=>"gzip", "alt-svc"=>"h3=\":443\"; ma=86400"}
# {
# "error": {
# "message": "The model `gpt-42` does not exist",
# "type": "invalid_request_error",
# "param": null,
# "code": "model_not_found"
# }
# }
```

#### Azure

To use the [Azure OpenAI Service](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/) API, you can configure the gem like this:
Expand Down
5 changes: 3 additions & 2 deletions lib/openai.rb
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
require "faraday"
require "faraday/multipart"

require_relative "openai/error"
require_relative "openai/http"
require_relative "openai/client"
require_relative "openai/files"
Expand All @@ -11,13 +12,12 @@
require_relative "openai/version"

module OpenAI
class Error < StandardError; end
class ConfigurationError < Error; end

class Configuration
attr_writer :access_token
attr_accessor :api_type, :api_version, :organization_id, :uri_base, :request_timeout,
:extra_headers
:extra_headers, :raise_error

DEFAULT_API_VERSION = "v1".freeze
DEFAULT_URI_BASE = "https://api.openai.com/".freeze
Expand All @@ -31,6 +31,7 @@ def initialize
@uri_base = DEFAULT_URI_BASE
@request_timeout = DEFAULT_REQUEST_TIMEOUT
@extra_headers = nil
@raise_error = false
end

def access_token
Expand Down
1 change: 1 addition & 0 deletions lib/openai/client.rb
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ class Client
uri_base
request_timeout
extra_headers
raise_error
].freeze
attr_reader *CONFIG_KEYS

Expand Down
6 changes: 6 additions & 0 deletions lib/openai/error.rb
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
# frozen_string_literal: true

module OpenAI
class Error < StandardError
end
end
22 changes: 22 additions & 0 deletions lib/openai/http.rb
Original file line number Diff line number Diff line change
@@ -1,9 +1,13 @@
require_relative "http/error"

module OpenAI
module HTTP
def get(path:)
to_json(conn.get(uri(path: path)) do |req|
req.headers = headers
end&.body)
rescue ::Faraday::Error => e
handle_response_error(e)
end

def json_post(path:, parameters:)
Expand All @@ -18,19 +22,25 @@ def json_post(path:, parameters:)
req.headers = headers
req.body = parameters.to_json
end&.body)
rescue ::Faraday::Error => e
handle_response_error(e)
end

def multipart_post(path:, parameters: nil)
to_json(conn(multipart: true).post(uri(path: path)) do |req|
req.headers = headers.merge({ "Content-Type" => "multipart/form-data" })
req.body = multipart_parameters(parameters)
end&.body)
rescue ::Faraday::Error => e
handle_response_error(e)
end

def delete(path:)
to_json(conn.delete(uri(path: path)) do |req|
req.headers = headers
end&.body)
rescue ::Faraday::Error => e
handle_response_error(e)
end

private
Expand Down Expand Up @@ -66,6 +76,11 @@ def conn(multipart: false)
Faraday.new do |f|
f.options[:timeout] = @request_timeout
f.request(:multipart) if multipart
# This raises Faraday::Error on status code 4xx or 5xx.
# These will get captured when making requests so we can propogate the
# error response information properly to end-users
# https://lostisland.github.io/faraday/#/middleware/included/raising-errors?id=raising-errors
f.response :raise_error if @raise_error
end
end

Expand Down Expand Up @@ -111,5 +126,12 @@ def multipart_parameters(parameters)
Faraday::UploadIO.new(value, "", value.path)
end
end

def handle_response_error(response_error)
# preserve the Timeout/Connection errors if not configured to raise from this library
raise unless @raise_error

raise Error.new(response_error.message, response_error.response)
end
end
end
89 changes: 89 additions & 0 deletions lib/openai/http/error.rb
Original file line number Diff line number Diff line change
@@ -0,0 +1,89 @@
module OpenAI
module HTTP
# Heavily borrowed from Faraday::Error
# https://github.com/lostisland/faraday/blob/ea30bd0b543882f1cf26e75ac4e46e0705fa7e68/lib/faraday/error.rb
class Error < ::OpenAI::Error
attr_reader :response, :wrapped_exception

def initialize(exc = nil, response = nil)
@wrapped_exception = nil unless defined?(@wrapped_exception)
@response = nil unless defined?(@response)
super(exc_msg_and_response!(exc, response))
end

def backtrace
if @wrapped_exception
@wrapped_exception.backtrace
else
super
end
end

def inspect
inner = +''
inner << " wrapped=#{@wrapped_exception.inspect}" if @wrapped_exception
inner << " response=#{@response.inspect}" if @response
inner << " #{super}" if inner.empty?
%(#<#{self.class}#{inner}>)
end

def response_status
return unless @response

@response.respond_to?(:status) ? @response.status : @response[:status]
end

def response_headers
return unless @response

@response.response_to?(:headers) ? @response.headers : @response[:headers]
end

def response_body
return unless @response

@response.respond_to?(:body) ? @response.body : @response[:body]
end

protected

# Pulls out potential parent exception and response hash, storing them in
# instance variables.
# exc - Either an Exception, a string message, or a response hash.
# response - Hash
# :status - Optional integer HTTP response status
# :headers - String key/value hash of HTTP response header
# values.
# :body - Optional string HTTP response body.
# :request - Hash
# :method - Symbol with the request HTTP method.
# :url - URI object with the url requested.
# :url_path - String with the url path requested.
# :params - String key/value hash of query params
# present in the request.
# :headers - String key/value hash of HTTP request
# header values.
# :body - String HTTP request body.
#
# If a subclass has to call this, then it should pass a string message
# to `super`. See NilStatusError.
def exc_msg_and_response!(exc, response = nil)
if @response.nil? && @wrapped_exception.nil?
@wrapped_exception, msg, @response = exc_msg_and_response(exc, response)
return msg
end

exc.to_s
end

# Pulls out potential parent exception and response hash.
def exc_msg_and_response(exc, response = nil)
return [exc, exc.message, response] if exc.respond_to?(:backtrace)

return [nil, "the server responded with status #{exc[:status]}", exc] if exc.respond_to?(:each_key)

[nil, exc.to_s, response]
end
end
end
end
9 changes: 7 additions & 2 deletions spec/openai/client/client_spec.rb
Original file line number Diff line number Diff line change
Expand Up @@ -24,15 +24,17 @@
organization_id: "organization_id1",
request_timeout: 60,
uri_base: "https://oai.hconeai.com/",
extra_headers: { "test" => "X-Test" }
extra_headers: { "test" => "X-Test" },
raise_error: true
)
end
let!(:c2) do
OpenAI::Client.new(
access_token: "access_token2",
organization_id: nil,
request_timeout: 1,
uri_base: "https://example.com/"
uri_base: "https://example.com/",
raise_error: false
)
end

Expand All @@ -42,6 +44,7 @@
expect(c0.organization_id).to eq("organization_id0")
expect(c0.request_timeout).to eq(OpenAI::Configuration::DEFAULT_REQUEST_TIMEOUT)
expect(c0.uri_base).to eq(OpenAI::Configuration::DEFAULT_URI_BASE)
expect(c0.raise_error).to eq(false)
expect(c0.send(:headers).values).to include("Bearer #{c0.access_token}")
expect(c0.send(:headers).values).to include(c0.organization_id)
expect(c0.send(:conn).options.timeout).to eq(OpenAI::Configuration::DEFAULT_REQUEST_TIMEOUT)
Expand All @@ -54,6 +57,7 @@
expect(c1.organization_id).to eq("organization_id1")
expect(c1.request_timeout).to eq(60)
expect(c1.uri_base).to eq("https://oai.hconeai.com/")
expect(c1.raise_error).to eq(true)
expect(c1.send(:headers).values).to include(c1.access_token)
expect(c1.send(:conn).options.timeout).to eq(60)
expect(c1.send(:uri, path: "")).to include("https://oai.hconeai.com/")
Expand All @@ -65,6 +69,7 @@
expect(c2.organization_id).to eq("organization_id0") # Fall back to default.
expect(c2.request_timeout).to eq(1)
expect(c2.uri_base).to eq("https://example.com/")
expect(c2.raise_error).to eq(false)
expect(c2.send(:headers).values).to include("Bearer #{c2.access_token}")
expect(c2.send(:headers).values).to include(c2.organization_id)
expect(c2.send(:conn).options.timeout).to eq(1)
Expand Down
60 changes: 60 additions & 0 deletions spec/openai/client/http_spec.rb
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,18 @@
expect(timeout_errors).to include(error.class)
end
end

context "when raise_error is configured to true" do
let(:timeout_errors) { [described_class::Error] }
before { OpenAI.configuration.raise_error = true }
after { OpenAI.configuration.raise_error = false }

it "times out with OpenAI::HTTP::Error" do
expect { response }.to raise_error do |error|
expect(timeout_errors).to include(error.class)
end
end
end
end

describe ".json_post" do
Expand All @@ -48,6 +60,18 @@
expect(timeout_errors).to include(error.class)
end
end

context "when raise_error is configured to true" do
let(:timeout_errors) { [described_class::Error] }
before { OpenAI.configuration.raise_error = true }
after { OpenAI.configuration.raise_error = false }

it "times out with OpenAI::HTTP::Error" do
expect { response }.to raise_error do |error|
expect(timeout_errors).to include(error.class)
end
end
end
end

context "streaming" do
Expand All @@ -63,6 +87,18 @@
expect(timeout_errors).to include(error.class)
end
end

context "when raise_error is configured to true" do
let(:timeout_errors) { [described_class::Error] }
before { OpenAI.configuration.raise_error = true }
after { OpenAI.configuration.raise_error = false }

it "times out with OpenAI::HTTP::Error" do
expect { response }.to raise_error do |error|
expect(timeout_errors).to include(error.class)
end
end
end
end
end

Expand All @@ -81,6 +117,18 @@
expect(timeout_errors).to include(error.class)
end
end

context "when raise_error is configured to true" do
let(:timeout_errors) { [described_class::Error] }
before { OpenAI.configuration.raise_error = true }
after { OpenAI.configuration.raise_error = false }

it "times out with OpenAI::HTTP::Error" do
expect { response }.to raise_error do |error|
expect(timeout_errors).to include(error.class)
end
end
end
end

describe ".delete" do
Expand All @@ -93,6 +141,18 @@
expect(timeout_errors).to include(error.class)
end
end

context "when raise_error is configured to true" do
let(:timeout_errors) { [described_class::Error] }
before { OpenAI.configuration.raise_error = true }
after { OpenAI.configuration.raise_error = false }

it "times out with OpenAI::HTTP::Error" do
expect { response }.to raise_error do |error|
expect(timeout_errors).to include(error.class)
end
end
end
end
end

Expand Down