diff --git a/Gemfile b/Gemfile index 2ec8963..5e151c3 100644 --- a/Gemfile +++ b/Gemfile @@ -1,8 +1,8 @@ -source 'https://rubygems.org' +source "https://rubygems.org" -ruby '>= 2.7' +ruby ">= 2.7" -gem 'faraday' -gem 'faraday-net_http_persistent' -gem 'rspec' -gem 'webmock' +gem "faraday" +gem "faraday-net_http_persistent" +gem "rspec" +gem "webmock" diff --git a/Gemfile.lock b/Gemfile.lock index 513c283..991b400 100644 --- a/Gemfile.lock +++ b/Gemfile.lock @@ -1,47 +1,47 @@ GEM remote: https://rubygems.org/ specs: - addressable (2.8.6) - public_suffix (>= 2.0.2, < 6.0) - bigdecimal (3.1.6) - connection_pool (2.4.1) + addressable (2.8.7) + public_suffix (>= 2.0.2, < 7.0) + bigdecimal (3.2.2) + connection_pool (2.5.3) crack (1.0.0) bigdecimal rexml - diff-lcs (1.5.1) - faraday (2.12.0) - faraday-net_http (>= 2.0, < 3.4) + diff-lcs (1.6.2) + faraday (2.13.1) + faraday-net_http (>= 2.0, < 3.5) json logger - faraday-net_http (3.3.0) - net-http + faraday-net_http (3.4.0) + net-http (>= 0.5.0) faraday-net_http_persistent (2.3.0) faraday (~> 2.5) net-http-persistent (>= 4.0.4, < 5) - hashdiff (1.1.0) - json (2.7.2) - logger (1.6.1) - net-http (0.4.1) + hashdiff (1.2.0) + json (2.12.2) + logger (1.7.0) + net-http (0.6.0) uri - net-http-persistent (4.0.4) - connection_pool (~> 2.2) - public_suffix (5.0.4) - rexml (3.2.6) - rspec (3.13.0) + net-http-persistent (4.0.6) + connection_pool (~> 2.2, >= 2.2.4) + public_suffix (6.0.2) + rexml (3.4.1) + rspec (3.13.1) rspec-core (~> 3.13.0) rspec-expectations (~> 3.13.0) rspec-mocks (~> 3.13.0) - rspec-core (3.13.0) + rspec-core (3.13.4) rspec-support (~> 3.13.0) - rspec-expectations (3.13.0) + rspec-expectations (3.13.5) diff-lcs (>= 1.2.0, < 2.0) rspec-support (~> 3.13.0) - rspec-mocks (3.13.0) + rspec-mocks (3.13.5) diff-lcs (>= 1.2.0, < 2.0) rspec-support (~> 3.13.0) - rspec-support (3.13.0) - uri (0.13.1) - webmock (3.22.0) + rspec-support (3.13.4) + uri (1.0.3) + webmock (3.25.1) addressable (>= 2.8.0) crack (>= 0.3.2) hashdiff (>= 0.4.0, < 2.0.0) diff --git a/README.md b/README.md index 0e8b0ef..dca7e37 100644 --- a/README.md +++ b/README.md @@ -3,73 +3,102 @@ [![Gem Version](https://badge.fury.io/rb/rubyai.svg)](https://badge.fury.io/rb/rubyai) [![GitHub license](https://img.shields.io/badge/license-MIT-blue.svg)](https://github.com/alexshapalov/rubyai/blob/main/LICENSE) -## Use the [OpenAI API 🤖 ](https://openai.com/blog/openai-api/) with Ruby! ❤️ +## Use the [OpenAI API 🤖](https://openai.com/blog/openai-api/) with Ruby! ❤️ -Generate text with ChatGPT (Generative Pre-trained Transformer) +Generate text with ChatGPT, Claude, and Gemini! +--- -# Installation +## Installation -Add this line to your application's Gemfile: +Install the latest version via Bundler: ```ruby -gem "rubyai" -``` - -And then execute: +bundle add rubyai - $ bundle install +Then execute: +```sh +$ bundle install +``` -Or install with: +Or install manually: - $ gem install rubyai +```sh +$ gem install rubyai +``` -and require with: +Then require it: ```ruby require "rubyai" ``` -# Usage +--- + +## Usage -- Get your API key from [https://beta.openai.com/account/api-keys](https://beta.openai.com/account/api-keys) +- Get your API key from: + + - [OpenAI](https://beta.openai.com/account/api-keys) + + - [Anthropic](https://console.anthropic.com) + + - [Gemini](https://aistudio.google.com/apikey) + +- If you belong to multiple organizations (OpenAI), get your Organization ID from: + + - [Org Settings](https://beta.openai.com/account/org-settings) + -- If you belong to multiple organizations, you can get your Organization ID from [https://beta.openai.com/account/org-settings](https://beta.openai.com/account/org-settings) +--- -### Quickstart +## Quickstart -For a quick test you can pass your token directly to a new client: +For a quick test, you can pass your token directly to a new client: ```ruby result = RubyAI::Client.new(access_token, messages).call ``` -### ChatGPT +--- + +## ChatGPT -ChatGPT is a conversational-style text generation model. -You can use it to [generate a response](https://platform.openai.com/docs/api-reference/chat/create) to a sequence of [messages](https://platform.openai.com/docs/guides/chat/introduction): +ChatGPT is a conversational-style text generation model. You can use it to generate a response to a sequence of messages: ```ruby api_key = "YOUR API KEY" messages = "Who is the best chess player in history?" -result = RubyAI::Client.new(api_key, messages, model: "gpt-4").call +result = RubyAI::Client.new(api_key: api_key, messages: messages, model: "gpt-4").call puts result.dig("choices", 0, "message", "content") - -# => As an AI language model, I do not have personal opinions, but according to historical records, Garry Kasparov is often considered as one of the best chess players in history. Other notable players include Magnus Carlsen, Bobby Fischer, and Jose Capablanca. ``` -You can also pass client variables using the configuration file. -Create configruation file like on example: +**Example output:** + +> As an AI language model, I do not have personal opinions, but according to historical records, Garry Kasparov is often considered as one of the best chess players in history. Other notable players include Magnus Carlsen, Bobby Fischer, and Jose Capablanca. + +--- + +TODO(because now you can't): +~~You can also pass client variables using a configuration file: + ```ruby -configuration = RubyAI::Configuration.new("YOUR API KEY", "Who is the best chess player in history?") +configuration = RubyAI::Configuration.new( + api_key: "YOUR API KEY", + messages: "Who is the best chess player in history?" +) client = RubyAI::Client.new(configuration) result = client.call puts result.dig("choices", 0, "message", "content") ``` +~~ + +--- + +If you're using Rails, you can use the `configure` block: -Also (mostly) if you are using Rails you can use configure method: ```ruby RubyAI.configure do |config| config.api_key = "YOUR API KEY" @@ -78,30 +107,126 @@ RubyAI.configure do |config| end ``` -## Models +--- -We support all popular GPT models: +### Configuration -gpt-4-turbo: A powerful variant of GPT-4 optimized for efficiency and speed, perfect for high-demand tasks. +Our gem uses separate configurations for each provider: -gpt-4o-mini: A streamlined version of GPT-4, designed to provide a balance between performance and resource efficiency. +```ruby +# For OpenAI +RubyAI.config.openai.configure do |config| + config.api = "your-api" + config.model = "o1-mini" + config.temperature = 0.75 +end -o1-mini: A compact, yet effective model that is well-suited for lightweight tasks. +# For Anthropic +RubyAI.config.anthropic.configure do |config| + config.api = "your-api" + config.model = "claude-2" + config.temperature = 0.75 + config.max_tokens = 1000 +end -o1-preview: A preview version of the o1 model, offering insights into upcoming advancements and features. +# For Gemini +RubyAI.config.gemini.configure do |config| + config.api = "your-api" + config.model = "gemini-1.5-pro" + config.temperature = 0.75 + config.max_tokens = 1000 +end +``` + +--- + +### Chat +After configuration, you can chat with models using the `Chat` class: + +```ruby +claude2 = RubyAI::Chat.new("anthropic", model: "claude-2") +claude2.call("Hello world") # => Hash response + +# Or + +gpt = RubyAI::Chat.new("openai", "gpt-4", temperature: 1) +gpt.call("Hello world!") # => Hash response +``` + +--- + +### Supported Models + +We support most popular GPT models: + +```ruby +p RubyAI.config.models + +"openai" => { + "gpt-3.5-turbo" => "gpt-3.5-turbo", + "gpt-4" => "gpt-4", + "gpt-4-32k" => "gpt-4-32k", + "gpt-4-turbo" => "gpt-4-turbo", + "gpt-4o-mini" => "gpt-4o-mini", + "o1-mini" => "o1-mini", + "o1-preview" => "o1-preview", + "text-davinci-003" => "text-davinci-003" +}, +"anthropic" => { + "claude-2" => "claude-2", + "claude-instant-100k" => "claude-instant-100k", + "claude-1" => "claude-1", + "claude-1.3" => "claude-1.3", + "claude-1.3-sonnet" => "claude-1.3-sonnet", + "claude-1.3-sonnet-100k" => "claude-1.3-sonnet-100k" +}, +"gemini" => { + "gemini-1.5-pro" => "gemini-1.5-pro", + "gemini-1.5-flash" => "gemini-1.5-flash", + "gemini-1.0-pro" => "gemini-1.0-pro" +} +``` + +--- + +## TODO + +- Support Gemini configuration via `configure` block +- Implement more LLM support +- Stream responses + +--- ## Development -After checking out the repo, run `bin/setup` to install dependencies. You can run `bin/console` for an interactive prompt that will allow you to experiment. +After checking out the repo, run: + +```sh +bin/setup +``` + +You can also run: -To install this gem onto your local machine, run `bundle exec rake install`. +```sh +bin/console +``` + +To install this gem onto your local machine: +```sh +bundle exec rake install +``` + +--- ## Contributing -Bug reports and pull requests are welcome on GitHub at . This project is intended to be a safe, welcoming space for collaboration, and contributors. +Bug reports and pull requests are welcome on GitHub at [https://github.com/alexshapalov/rubyai](https://github.com/alexshapalov/rubyai). +This project is intended to be a safe, welcoming space for collaboration. + +--- ## License -The gem is available as open source under the terms of the [MIT License](https://opensource.org/licenses/MIT). +This gem is open source under the [MIT License](https://opensource.org/licenses/MIT). diff --git a/lib/rubyai.rb b/lib/rubyai.rb index 211a3bc..0712424 100644 --- a/lib/rubyai.rb +++ b/lib/rubyai.rb @@ -1,12 +1,30 @@ -require 'faraday' -require 'faraday/net_http_persistent' -require 'json' +require "faraday" +require "faraday/net_http_persistent" +require "json" -require_relative "rubyai/client" +require_relative "rubyai/providers/base" +require_relative "rubyai/providers/openai" +require_relative "rubyai/providers/anthropic" +require_relative "rubyai/providers/gemini" +require_relative "rubyai/provider" require_relative "rubyai/configuration" require_relative "rubyai/http" +require_relative "rubyai/client" +require_relative "rubyai/chat" require_relative "rubyai/version" module RubyAI class Error < StandardError; end + + def self.models + RubyAI::Provider.models + end + + def self.configure + yield config + end + + def self.config(config = {}) + @config ||= Configuration.new(config) + end end diff --git a/lib/rubyai/chat.rb b/lib/rubyai/chat.rb new file mode 100644 index 0000000..cf8f982 --- /dev/null +++ b/lib/rubyai/chat.rb @@ -0,0 +1,43 @@ +module RubyAI + class Chat + attr_accessor :provider, :model, :temperature + + def initialize(provider, + model: nil, + temperature: 0.75) + @provider = provider || RubyAI.config.default_provider + @model = model + @temperature = temperature + end + + def call(messages) + raise ArgumentError, "Messages cannot be empty" if messages.nil? || messages.empty? + + body = HTTP.build_body(messages, @provider, @model, @temperature) + headers = HTTP.build_headers(provider) + + response = connection.post do |req| + req.url Provider::PROVIDERS[@provider, @model] + req.headers.merge!(headers) + req.body = body.to_json + end + + JSON.parse(response.body) + end + + private + + def connection + @connection ||= Faraday.new do |faraday| + faraday.adapter Faraday.default_adapter + faraday.headers["Content-Type"] = "application/json" + end + rescue Faraday::Error => e + raise "Connection error: #{e.message}" + rescue JSON::ParserError => e + raise "Response parsing error: #{e.message}" + rescue StandardError => e + raise "An unexpected error occurred: #{e.message}" + end + end +end diff --git a/lib/rubyai/client.rb b/lib/rubyai/client.rb index 6cbc096..2e25fb5 100644 --- a/lib/rubyai/client.rb +++ b/lib/rubyai/client.rb @@ -1,16 +1,25 @@ module RubyAI class Client - attr_reader :configuration + attr_accessor :configuration - def initialize(config_hash = {}) - @configuration = Configuration.new(config_hash) + def initialize(config = {}) + @configuration = RubyAI.config(config) end def call + messages = configuration.messages + provider = configuration.provider + model = configuration.model + temperature = configuration.temperature + raise ArgumentError, "Messages cannot be empty" if messages.nil? || messages.empty? + + body = HTTP.build_body(messages, provider, model, temperature) + headers = HTTP.build_headers(provider) + response = connection.post do |req| - req.url Configuration::BASE_URL - req.headers.merge!(HTTP.build_headers(configuration.api_key)) - req.body = HTTP.build_body(configuration.messages, configuration.model, configuration.temperature).to_json + req.url Provider::PROVIDERS[provider, model] + req.headers.merge!(headers) + req.body = body.to_json end JSON.parse(response.body) @@ -19,9 +28,16 @@ def call private def connection - @connection ||= Faraday.new do |faraday| + connection ||= Faraday.new do |faraday| faraday.adapter Faraday.default_adapter + faraday.headers["Content-Type"] = "application/json" end + rescue Faraday::Error => e + raise "Connection error: #{e.message}" + rescue JSON::ParserError => e + raise "Response parsing error: #{e.message}" + rescue StandardError => e + raise "An unexpected error occurred: #{e.message}" end end end diff --git a/lib/rubyai/configuration.rb b/lib/rubyai/configuration.rb index ed0bc1b..2f08b8c 100644 --- a/lib/rubyai/configuration.rb +++ b/lib/rubyai/configuration.rb @@ -1,34 +1,50 @@ module RubyAI class Configuration - BASE_URL = "https://api.openai.com/v1/chat/completions" + # providers config accessors + attr_accessor :openai, + :anthropic, + :gemini, + # default api config accessors + :api, + :model, + :messages, + :temperature, + :provider - MODELS = { - "gpt-4" => "gpt-4", - "gpt-4-32k" => "gpt-4-32k", - "gpt-4-turbo" => "gpt-4-turbo", - "gpt-4o-mini" => "gpt-4o-mini", - "o1-mini" => "o1-mini", - "o1-preview" => "o1-preview", - "text-davinci-003" => "text-davinci-003" - } + def initialize(config = nil) + @openai ||= Providers::OpenAI.new + @anthropic ||= Providers::Anthropic.new + @gemini ||= Providers::Gemini.new + return if config.nil? || config.empty? - DEFAULT_MODEL = "gpt-3.5-turbo" + provider_key = if config.fetch(:model, + nil).nil? + "openai" + else + Provider.models.find do |_provider, models| + models.include?(config[:model]) + end.first + end - attr_accessor :api_key, :model, :messages, :temperature + raise ArgumentError, "Unknown model: #{config[:model]}" unless provider_key - def initialize(config = {}) - @api_key = config[:api_key] - @model = config.fetch(:model, DEFAULT_MODEL) - @messages = config.fetch(:messages, nil) - @temperature = config.fetch(:temperature, 0.7) + @model = config.fetch(:model, Provider.models["openai"]["gpt-3.5-turbo"]) + provider = send(provider_key) + provider.api = config[:api_key] + @api_key = provider.api + provider.messages = config[:messages] + @messages = provider.messages + @provider = provider_key + provider.temperature = config.fetch(:temperature, 0.7) + @temperature = provider.temperature end - end - def self.configuration - @configuration ||= Configuration.new - end + def self.config + @config ||= RubyAI.config({}) + end - def self.configure - yield(configuration) + def self.configure + yield(config) + end end end diff --git a/lib/rubyai/http.rb b/lib/rubyai/http.rb index cd73806..967c031 100644 --- a/lib/rubyai/http.rb +++ b/lib/rubyai/http.rb @@ -1,20 +1,13 @@ module RubyAI module HTTP - extend self + module_function - def build_body(messages, model, temperature) - { - 'model': Configuration::MODELS[model], - 'messages': [{ "role": "user", "content": messages }], - 'temperature': temperature - } + def build_body(messages, provider, model, temperature) + RubyAI.config.send(provider).build_http_body(messages, model, temperature) end - def build_headers(api_key) - { - 'Content-Type': 'application/json', - 'Authorization': "Bearer #{api_key}" - } + def build_headers(provider) + RubyAI.config.send(provider).build_http_headers(provider) end end end diff --git a/lib/rubyai/provider.rb b/lib/rubyai/provider.rb new file mode 100644 index 0000000..8eee905 --- /dev/null +++ b/lib/rubyai/provider.rb @@ -0,0 +1,23 @@ +module RubyAI + module Provider + module_function + + PROVIDERS = { + "openai" => "https://api.openai.com/v1/chat/completions", + "anthropic" => "https://api.anthropic.com/v1/chat/completions", + "gemini" => "https://generativelanguage.googleapis.com/v1beta/models" + } + + def PROVIDERS.[](provider, model = nil) + return super(provider) unless !model.nil? && provider == "gemini" + + "#{super(provider)}/#{model}:generateContent?key=#{RubyAI.config.gemini.api}" + end + + def models + PROVIDERS.to_h do |provider, _url| + [provider, RubyAI.config.send(provider).models] + end + end + end +end diff --git a/lib/rubyai/providers/anthropic.rb b/lib/rubyai/providers/anthropic.rb new file mode 100644 index 0000000..945cc25 --- /dev/null +++ b/lib/rubyai/providers/anthropic.rb @@ -0,0 +1,51 @@ +module RubyAI + module Providers + class Anthropic < Base + attr_accessor :api, :messages, :temperature, :max_tokens + + def initialize(api: nil, messages: nil, temperature: 0.7, model: "claude-2") + @api = api + @messages = messages + @temperature = temperature + @model = model + end + + def models + { + "claude-2" => "claude-2", + "claude-instant-100k" => "claude-instant-100k", + "claude-1" => "claude-1", + "claude-1.3" => "claude-1.3", + "claude-1.3-sonnet" => "claude-1.3-sonnet", + "claude-1.3-sonnet-100k" => "claude-1.3-sonnet-100k" + }.freeze + end + + def build_http_body(messages = nil, model = nil, temperature = nil) + { + "model" => RubyAI::Provider.models["anthropic"][model || @model], + "max_tokens" => 1024, # Required parameter for Anthropic API + "messages" => format_messages_for_antropic(messages || @messages), + "temperature" => temperature || @temperature + } + end + + def build_http_headers(_provider) + { + "x-api-key" => RubyAI.config.anthropic.api, + "anthropic-version" => "2023-06-01" + } + end + + def format_messages_for_antropic(messages) + # Messages should be an array of message objects + # Each message needs 'role' (either 'user' or 'assistant') and 'content' + if messages.is_a?(String) + [{ "role" => "user", "content" => messages }] + else + messages + end + end + end + end +end diff --git a/lib/rubyai/providers/base.rb b/lib/rubyai/providers/base.rb new file mode 100644 index 0000000..4e5b9ab --- /dev/null +++ b/lib/rubyai/providers/base.rb @@ -0,0 +1,9 @@ +module RubyAI + module Providers + class Base + def configure + yield self + end + end + end +end diff --git a/lib/rubyai/providers/gemini.rb b/lib/rubyai/providers/gemini.rb new file mode 100644 index 0000000..de955d2 --- /dev/null +++ b/lib/rubyai/providers/gemini.rb @@ -0,0 +1,47 @@ +module RubyAI + module Providers + class Gemini < Base + attr_accessor :api, :messages, :temperature, :max_tokens, :model + + def initialize(api: nil, messages: nil, temperature: 0.7, max_tokens: 1000) + @api = api + @messages = messages + @temperature = temperature + @max_tokens = max_tokens + end + + def models + { + "gemini-1.5-pro" => "gemini-1.5-pro", + "gemini-1.5-flash" => "gemini-1.5-flash", + "gemini-1.0-pro" => "gemini-1.0-pro" + } + end + + def build_http_body(messages = nil, _model, temperature, max_tokens: 1000) + { + contents: [ + { + role: "user", + parts: [ + { + text: messages || @messages + } + ] + } + ], + generationConfig: { + temperature: temperature || @temperature, + maxOutputTokens: @max_tokens || max_tokens, + topP: 0.8, + topK: 10 + } + } + end + + def build_http_headers(_provider) + {} + end + end + end +end diff --git a/lib/rubyai/providers/openai.rb b/lib/rubyai/providers/openai.rb new file mode 100644 index 0000000..0d3675f --- /dev/null +++ b/lib/rubyai/providers/openai.rb @@ -0,0 +1,40 @@ +module RubyAI + module Providers + class OpenAI < Base + attr_accessor :api, :messages, :temperature, :model + + def initialize(api: nil, messages: nil, temperature: 0.7) + @api = api + @messages = messages + @temperature = temperature + end + + def models + { + "gpt-3.5-turbo" => "gpt-3.5-turbo", + "gpt-4" => "gpt-4", + "gpt-4-32k" => "gpt-4-32k", + "gpt-4-turbo" => "gpt-4-turbo", + "gpt-4o-mini" => "gpt-4o-mini", + "o1-mini" => "o1-mini", + "o1-preview" => "o1-preview", + "text-davinci-003" => "text-davinci-003" + } + end + + def build_http_body(messages = nil, model = "gpt-3.5-turbo", temperature = nil) + { + model: RubyAI::Provider.models["openai"][model], + messages: [{ role: "user", content: messages || @messages }], + temperature: temperature || @temperature + } + end + + def build_http_headers(_provider) + { + Authorization: "Bearer #{@api || RubyAI.config.openai.api}" + } + end + end + end +end diff --git a/rubyai.gemspec b/rubyai.gemspec index e9ebaff..326f938 100644 --- a/rubyai.gemspec +++ b/rubyai.gemspec @@ -32,7 +32,8 @@ Gem::Specification.new do |s| # Metadata information (optional but useful for gem hosts) s.metadata = { "source_code_uri" => "https://github.com/alexshapalov/rubyai", - "changelog_uri" => "https://github.com/alexshapalov/rubyai/CHANGELOG.md", - "documentation_uri" => "https://github.com/alexshapalov/rubyai#readme" + "changelog_uri" => "https://github.com/alexshapalov/rubyai/CHANGELOG.md", + "documentation_uri" => "https://github.com/alexshapalov/rubyai#readme", + "rubygems_mfa_required" => "true" } end diff --git a/spec/client_spec.rb b/spec/client_spec.rb deleted file mode 100644 index f74c472..0000000 --- a/spec/client_spec.rb +++ /dev/null @@ -1,24 +0,0 @@ -require 'webmock/rspec' -require_relative '../lib/rubyai/client.rb' - -RSpec.describe RubyAI::Client do - let(:api_key) { 'your_api_key' } - let(:messages) { 'Hello, how are you?' } - let(:temperature) { 0.7 } - let(:model) { 'gpt-3.5-turbo' } - let(:client) { described_class.new(api_key: api_key, messages: messages, temperature: temperature, model: model) } - - describe '#call' do - let(:response_body) { { 'completion' => 'This is a response from the model.' } } - let(:status) { 200 } - - before do - stub_request(:post, RubyAI::Configuration::BASE_URL) - .to_return(status: status, body: response_body.to_json, headers: { 'Content-Type' => 'application/json' }) - end - - it 'returns parsed JSON response when passing through client directly' do - expect(client.call).to eq(response_body) - end - end -end diff --git a/spec/configuration_spec.rb b/spec/configuration_spec.rb deleted file mode 100644 index e9298f1..0000000 --- a/spec/configuration_spec.rb +++ /dev/null @@ -1,33 +0,0 @@ -require 'webmock/rspec' -require_relative '../lib/rubyai/client.rb' - -RSpec.describe RubyAI::Client do - let(:api_key) { 'your_api_key' } - let(:messages) { 'Hello, how are you?' } - let(:temperature) { 0.7 } - let(:model) { 'gpt-3.5-turbo' } - - before do - RubyAI.configure do |config| - config.api_key = api_key - config.messages = messages - end - end - - describe '#call' do - let(:response_body) { { 'choices' => [{ 'message' => { 'content' => 'This is a response from the model.' } }] } } - let(:status) { 200 } - - before do - stub_request(:post, RubyAI::Configuration::BASE_URL) - .to_return(status: status, body: response_body.to_json, headers: { 'Content-Type' => 'application/json' }) - end - - it 'returns parsed JSON response when passing through client via configuration' do - configuration = { api_key: RubyAI.configuration.api_key, messages: RubyAI.configuration.messages } - client = described_class.new(configuration) - result = client.call - expect(result.dig('choices', 0, 'message', 'content')).to eq('This is a response from the model.') - end - end -end diff --git a/spec/rubyai/chat_spec.rb b/spec/rubyai/chat_spec.rb new file mode 100644 index 0000000..2bce5ef --- /dev/null +++ b/spec/rubyai/chat_spec.rb @@ -0,0 +1,118 @@ +require_relative "../../lib/rubyai" +require "webmock/rspec" + +# spec/ruby_ai/chat_spec.rb + +require "spec_helper" + +RSpec.describe RubyAI::Chat do + let(:provider) { "openai" } + let(:model) { "gpt-4" } + let(:temperature) { 0.9 } + let(:messages) { ["Hello"] } + let(:response_body) { { "reply" => "Hi!" }.to_json } + + subject(:chat) { described_class.new(provider, model: model, temperature: temperature) } + + describe "#initialize" do + it "sets the provider, model, and temperature" do + expect(chat.provider).to eq(provider) + expect(chat.model).to eq(model) + expect(chat.temperature).to eq(temperature) + end + + it "uses default provider if none is given" do + allow(RubyAI).to receive_message_chain(:config, + :default_provider).and_return("default_provider") + chat_instance = described_class.new(nil) + expect(chat_instance.provider).to eq("default_provider") + end + end + + describe "#call" do + context "when messages are nil or empty" do + it "raises ArgumentError" do + expect { chat.call(nil) }.to raise_error(ArgumentError, "Messages cannot be empty") + expect { chat.call([]) }.to raise_error(ArgumentError, "Messages cannot be empty") + end + end + + context "when messages are valid" do + let(:fake_connection) { instance_double(Faraday::Connection) } + let(:fake_response) { instance_double(Faraday::Response, body: response_body) } + let(:fake_connection) do + instance_double(Faraday::Connection).tap do |conn| + allow(conn).to receive(:post).and_return(fake_response) + allow(conn).to receive(:headers).and_return({}) + allow(conn).to receive(:adapter) + end + end + + let(:url) { "https://fake.provider/api" } + + before do + allow(RubyAI::HTTP).to receive(:build_body).with(messages, provider, model, + temperature).and_return({ body: "data" }) + allow(RubyAI::HTTP).to receive(:build_headers).with(provider).and_return({ "Authorization" => "Bearer token" }) + + stub_const("RubyAI::Configuration::PROVIDERS", { [provider, model] => url }) + + allow(Faraday).to receive(:new).and_return(fake_connection) + allow(fake_connection).to receive(:headers).and_return({}) + allow(fake_connection).to receive(:adapter) + + allow(fake_connection).to receive(:post).and_return(fake_response) + end + + it "returns parsed JSON response" do + result = chat.call(messages) + expect(result).to eq({ "reply" => "Hi!" }) + end + end + + context "when Faraday connection fails" do + before do + allow(Faraday).to receive(:new).and_raise(Faraday::ConnectionFailed.new("no internet")) + end + + it "raises a connection error" do + expect { chat.call(messages) }.to raise_error("Connection error: no internet") + end + end + + context "when JSON parsing fails" do + let(:bad_response) { instance_double(Faraday::Response, body: "not_json") } + + before do + stub_const("RubyAI::Configuration::PROVIDERS", { [provider, model] => "fake_url" }) + + allow(RubyAI::HTTP).to receive(:build_body).and_return({}) + allow(RubyAI::HTTP).to receive(:build_headers).and_return({}) + + allow(Faraday).to receive(:new).and_return(double( + headers: {}, + adapter: nil, + post: bad_response + )) + + allow(JSON).to receive(:parse).and_raise(JSON::ParserError.new("unexpected token")) + end + + it "raises a JSON parse error" do + expect { chat.call(messages) }.to raise_error(JSON::ParserError, "unexpected token") + end + end + + context "when any other error occurs" do + before do + allow(Faraday).to receive(:new).and_raise(StandardError.new("something went wrong")) + end + + it "raises a generic error" do + expect do + chat.call(messages) + end.to raise_error("An unexpected error occurred: something went wrong") + end + end + end +end diff --git a/spec/rubyai/configuration_spec.rb b/spec/rubyai/configuration_spec.rb new file mode 100644 index 0000000..ccdfa15 --- /dev/null +++ b/spec/rubyai/configuration_spec.rb @@ -0,0 +1,42 @@ +require_relative "../../lib/rubyai" +require_relative "../../lib/rubyai/provider" + +require "spec_helper" + +RSpec.describe RubyAI::Configuration do + describe "#initialize" do + let(:config) { described_class.new } + + it "initializes OpenAI provider" do + expect(config.openai).to be_a(RubyAI::Providers::OpenAI) + end + + it "initializes Anthropic provider" do + expect(config.anthropic).to be_a(RubyAI::Providers::Anthropic) + end + + it "initializes Gemini provider" do + expect(config.gemini).to be_a(RubyAI::Providers::Gemini) + end + end + + describe "RubyAI.configuration" do + it "returns a singleton configuration object" do + config1 = RubyAI.config + config2 = RubyAI.config + expect(config1).to be_a(described_class) + expect(config1).to equal(config2) + end + end + + describe "RubyAI.configure" do + it "yields configuration object for custom setup" do + RubyAI.configure do |config| + expect(config).to be_a(described_class) + config.openai.api = "custom_key" + end + + expect(RubyAI.config.openai.api).to eq("custom_key") + end + end +end diff --git a/spec/rubyai/http_spec.rb b/spec/rubyai/http_spec.rb new file mode 100644 index 0000000..1600d17 --- /dev/null +++ b/spec/rubyai/http_spec.rb @@ -0,0 +1,224 @@ +require_relative "../../lib/rubyai/http" +require "spec_helper" + +RSpec.describe RubyAI::HTTP do + describe ".build_body" do + let(:messages) { [{ role: "user", content: "Hello" }] } + let(:provider) { :openai } + let(:model) { "gpt-3.5-turbo" } + let(:temperature) { 0.7 } + let(:mock_config) { double("config") } + let(:mock_provider_config) { double("provider_config") } + let(:expected_body) { { messages: messages, model: model, temperature: temperature } } + + before do + allow(RubyAI).to receive(:config).and_return(mock_config) + allow(mock_config).to receive(:send).with(provider).and_return(mock_provider_config) + end + + context "when provider configuration exists" do + it "calls build_http_body on the provider configuration" do + expect(mock_provider_config).to receive(:build_http_body) + .with(messages, model, temperature) + .and_return(expected_body) + + result = described_class.build_body(messages, provider, model, temperature) + expect(result).to eq(expected_body) + end + + it "passes all parameters correctly" do + expect(mock_provider_config).to receive(:build_http_body) + .with(messages, model, temperature) + + described_class.build_body(messages, provider, model, temperature) + end + end + + context "with different providers" do + it "works with anthropic provider" do + anthropic_provider = :anthropic + allow(mock_config).to receive(:send).with(anthropic_provider).and_return(mock_provider_config) + expect(mock_provider_config).to receive(:build_http_body) + .with(messages, model, temperature) + + described_class.build_body(messages, anthropic_provider, model, temperature) + end + + it "works with custom provider" do + custom_provider = :custom + allow(mock_config).to receive(:send).with(custom_provider).and_return(mock_provider_config) + expect(mock_provider_config).to receive(:build_http_body) + .with(messages, model, temperature) + + described_class.build_body(messages, custom_provider, model, temperature) + end + end + + context "with different parameter types" do + it "handles string provider" do + string_provider = "openai" + allow(mock_config).to receive(:send).with(string_provider).and_return(mock_provider_config) + expect(mock_provider_config).to receive(:build_http_body) + .with(messages, model, temperature) + + described_class.build_body(messages, string_provider, model, temperature) + end + + it "handles different temperature values" do + [0.0, 0.5, 1.0, 1.5].each do |temp| + expect(mock_provider_config).to receive(:build_http_body) + .with(messages, model, temp) + + described_class.build_body(messages, provider, model, temp) + end + end + + it "handles different model types" do + models = %w[gpt-4 claude-3 llama-2] + models.each do |test_model| + expect(mock_provider_config).to receive(:build_http_body) + .with(messages, test_model, temperature) + + described_class.build_body(messages, provider, test_model, temperature) + end + end + end + + context "when provider configuration raises an error" do + it "propagates the error from provider configuration" do + allow(mock_provider_config).to receive(:build_http_body) + .and_raise(StandardError, "Provider configuration error") + + expect do + described_class.build_body(messages, provider, model, temperature) + end.to raise_error(StandardError, "Provider configuration error") + end + end + end + + describe ".build_headers" do + let(:provider) { :openai } + let(:mock_config) { double("config") } + let(:mock_provider_config) { double("provider_config") } + let(:expected_headers) do + { "Authorization" => "Bearer token", "Content-Type" => "application/json" } + end + + before do + allow(RubyAI).to receive(:config).and_return(mock_config) + allow(mock_config).to receive(:send).with(provider).and_return(mock_provider_config) + end + + context "when provider configuration exists" do + it "calls build_http_headers on the provider configuration" do + expect(mock_provider_config).to receive(:build_http_headers) + .with(provider) + .and_return(expected_headers) + + result = described_class.build_headers(provider) + expect(result).to eq(expected_headers) + end + + it "passes the provider parameter correctly" do + expect(mock_provider_config).to receive(:build_http_headers) + .with(provider) + + described_class.build_headers(provider) + end + end + + context "with different providers" do + it "works with anthropic provider" do + anthropic_provider = :anthropic + allow(mock_config).to receive(:send).with(anthropic_provider).and_return(mock_provider_config) + expect(mock_provider_config).to receive(:build_http_headers) + .with(anthropic_provider) + + described_class.build_headers(anthropic_provider) + end + + it "works with string provider" do + string_provider = "openai" + allow(mock_config).to receive(:send).with(string_provider).and_return(mock_provider_config) + expect(mock_provider_config).to receive(:build_http_headers) + .with(string_provider) + + described_class.build_headers(string_provider) + end + end + + context "when provider configuration raises an error" do + it "propagates the error from provider configuration" do + allow(mock_provider_config).to receive(:build_http_headers) + .and_raise(ArgumentError, "Invalid provider") + + expect do + described_class.build_headers(provider) + end.to raise_error(ArgumentError, "Invalid provider") + end + end + end + + describe "module structure" do + it "is a module" do + expect(RubyAI::HTTP).to be_a(Module) + end + + it "extends module_function" do + expect(RubyAI::HTTP.methods).to include(:build_body, :build_headers) + end + + it "has the expected public methods" do + expect(described_class).to respond_to(:build_body) + expect(described_class).to respond_to(:build_headers) + end + end + + describe "integration scenarios" do + let(:mock_config) { double("config") } + let(:mock_openai_config) { double("openai_config") } + let(:mock_anthropic_config) { double("anthropic_config") } + + before do + allow(RubyAI).to receive(:config).and_return(mock_config) + allow(mock_config).to receive(:send).with(:openai).and_return(mock_openai_config) + allow(mock_config).to receive(:send).with(:anthropic).and_return(mock_anthropic_config) + end + + it "can build body and headers for the same provider" do + messages = [{ role: "user", content: "Test" }] + provider = :openai + model = "gpt-3.5-turbo" + temperature = 0.8 + + expect(mock_openai_config).to receive(:build_http_body) + .with(messages, model, temperature) + .and_return({ messages: messages }) + + expect(mock_openai_config).to receive(:build_http_headers) + .with(provider) + .and_return({ "Authorization" => "Bearer test" }) + + body = described_class.build_body(messages, provider, model, temperature) + headers = described_class.build_headers(provider) + + expect(body).to eq({ messages: messages }) + expect(headers).to eq({ "Authorization" => "Bearer test" }) + end + + it "can handle different providers in the same test" do + messages = [{ role: "user", content: "Test" }] + + expect(mock_openai_config).to receive(:build_http_body) + .and_return({ openai: "body" }) + expect(mock_anthropic_config).to receive(:build_http_body) + .and_return({ anthropic: "body" }) + + openai_body = described_class.build_body(messages, :openai, "gpt-4", 0.7) + anthropic_body = described_class.build_body(messages, :anthropic, "claude-3", 0.5) + + expect(openai_body).to eq({ openai: "body" }) + expect(anthropic_body).to eq({ anthropic: "body" }) + end + end +end diff --git a/spec/rubyai/provider_spec.rb b/spec/rubyai/provider_spec.rb new file mode 100644 index 0000000..b014c9a --- /dev/null +++ b/spec/rubyai/provider_spec.rb @@ -0,0 +1,55 @@ +require_relative "../../lib/rubyai/providers/base" +require_relative "../../lib/rubyai/providers/openai" +require_relative "../../lib/rubyai/providers/anthropic" +require_relative "../../lib/rubyai/providers/gemini" +require_relative "../../lib/rubyai/provider" + +require "spec_helper" + +RSpec.describe RubyAI::Provider do + describe "::PROVIDERS" do + it "returns URL for openai and anthropic without model" do + expect(described_class::PROVIDERS["openai"]).to eq("https://api.openai.com/v1/chat/completions") + expect(described_class::PROVIDERS["anthropic"]).to eq("https://api.anthropic.com/v1/chat/completions") + end + + it "returns extended Gemini URL with model and API key" do + providers_hash = described_class::PROVIDERS + + # Stub API key + fake_api_key = "dummy_key" + allow(RubyAI).to receive_message_chain(:config, :gemini, :api).and_return(fake_api_key) + + gemini_provider = providers_hash["gemini", "gemini-1.5-pro"] + expect(gemini_provider).to eq( + "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-pro:generateContent?key=#{fake_api_key}" + ) + end + end + + describe "::MODELS" do + before do + stub_const("RubyAI::Provider::PROVIDERS", { + "openai" => "https://openai.test", + "gemini" => "https://gemini.test" + }) + + fake_openai = double("OpenAI", models: { "gpt-test" => "gpt-test" }) + fake_gemini = double("Gemini", models: { "gemini-test" => "gemini-test" }) + + allow(RubyAI::Provider.models).to receive(:[]).with("openai").and_return(fake_openai) + allow(RubyAI::Provider.models).to receive(:[]).with("gemini").and_return(fake_gemini) + + # Reload MODELS constant + stub_const("RubyAI::Provider::MODELS", { + "openai" => fake_openai.models, + "gemini" => fake_gemini.models + }) + end + + it "returns correct models hash" do + expect(described_class::MODELS["openai"]).to eq({ "gpt-test" => "gpt-test" }) + expect(described_class::MODELS["gemini"]).to eq({ "gemini-test" => "gemini-test" }) + end + end +end diff --git a/spec/rubyai/providers/anthropic_spec.rb b/spec/rubyai/providers/anthropic_spec.rb new file mode 100644 index 0000000..4952905 --- /dev/null +++ b/spec/rubyai/providers/anthropic_spec.rb @@ -0,0 +1,88 @@ +require_relative "../../../lib/rubyai" + +require "spec_helper" + +RSpec.describe RubyAI::Providers::Anthropic do + subject(:anthropic) do + described_class.new(api: "test_api", messages: "Hello Claude", temperature: 0.5) + end + let(:instance) { described_class.new } + + describe "#initialize" do + it "initializes instance variables correctly" do + expect(anthropic.api).to eq("test_api") + expect(anthropic.messages).to eq("Hello Claude") + expect(anthropic.temperature).to eq(0.5) + end + + it "sets default model if not provided" do + expect(anthropic.instance_variable_get(:@model)).to eq("claude-2") + end + end + + describe "#models" do + it "returns the list of Anthropic models" do + expect(instance.models).to include( + "claude-2" => "claude-2", + "claude-instant-100k" => "claude-instant-100k", + "claude-1.3-sonnet-100k" => "claude-1.3-sonnet-100k" + ) + end + end + + describe "#build_http_body" do + before do + stub_const("Configuration::MODELS", { + "anthropic" => { + "claude-2" => "claude-2" + } + }) + end + + it "builds correct body with string message" do + result = instance.build_http_body("Hi!", "claude-2", 0.6) + + expect(result["model"]).to eq("claude-2") + expect(result["max_tokens"]).to eq(1024) + expect(result["temperature"]).to eq(0.6) + expect(result["messages"]).to eq([{ "role" => "user", "content" => "Hi!" }]) + end + + it "builds correct body with array of messages" do + messages = [ + { "role" => "user", "content" => "Hello" }, + { "role" => "assistant", "content" => "Hi!" } + ] + + result = instance.build_http_body(messages, "claude-2", 0.5) + expect(result["messages"]).to eq(messages) + end + end + + describe "#build_http_headers" do + before do + allow(RubyAI).to receive_message_chain(:config, :anthropic, + :api).and_return("real_api_key") + end + + it "returns correct headers for Anthropic API" do + headers = instance.build_http_headers("anthropic") + + expect(headers["x-api-key"]).to eq("real_api_key") + expect(headers["anthropic-version"]).to eq("2023-06-01") + end + end + + describe "#format_messages_for_antropic" do + it "wraps string in user role message" do + result = instance.send(:format_messages_for_antropic, "Hello") + expect(result).to eq([{ "role" => "user", "content" => "Hello" }]) + end + + it "returns array unchanged if already properly formatted" do + messages = [{ "role" => "assistant", "content" => "Hi" }] + result = instance.send(:format_messages_for_antropic, messages) + expect(result).to eq(messages) + end + end +end diff --git a/spec/rubyai/providers/gemini_spec.rb b/spec/rubyai/providers/gemini_spec.rb new file mode 100644 index 0000000..c922da2 --- /dev/null +++ b/spec/rubyai/providers/gemini_spec.rb @@ -0,0 +1,47 @@ +require "spec_helper" +require_relative "../../../lib/rubyai/providers/base" +require_relative "../../../lib/rubyai/providers/gemini" + +RSpec.describe RubyAI::Providers::Gemini do + subject(:gemini) do + described_class.new(api: "fake_api", messages: "Hello", temperature: 0.9, max_tokens: 500) + end + let(:instance) { described_class.new } + + describe "#initialize" do + it "sets instance variables correctly" do + expect(gemini.api).to eq("fake_api") + expect(gemini.messages).to eq("Hello") + expect(gemini.temperature).to eq(0.9) + expect(gemini.max_tokens).to eq(500) + end + + it "sets default values if not provided" do + default_instance = described_class.new + expect(default_instance.temperature).to eq(0.7) + expect(default_instance.max_tokens).to eq(1000) + end + end + + describe "#models" do + it "returns a hash of available Gemini models" do + expect(instance.models).to include( + "gemini-1.5-pro" => "gemini-1.5-pro", + "gemini-1.5-flash" => "gemini-1.5-flash", + "gemini-1.0-pro" => "gemini-1.0-pro" + ) + end + end + + describe "#build_http_body" do + let(:body) { instance.build_http_body("Hi", "gemini-1.5-pro", 0.6) } + + it "builds the correct request body format" do + expect(body).to include(:contents, :generationConfig) + expect(body[:contents].first[:parts].first[:text]).to eq("Hi") + expect(body[:generationConfig][:temperature]).to eq(0.6) + expect(body[:generationConfig][:topP]).to eq(0.8) + expect(body[:generationConfig][:topK]).to eq(10) + end + end +end diff --git a/spec/rubyai/providers/openai_spec.rb b/spec/rubyai/providers/openai_spec.rb new file mode 100644 index 0000000..5ad7b61 --- /dev/null +++ b/spec/rubyai/providers/openai_spec.rb @@ -0,0 +1,68 @@ +require_relative "../../../lib/rubyai" + +# spec/ruby_ai/providers/openai_spec.rb + +require "spec_helper" + +RSpec.describe RubyAI::Providers::OpenAI do + subject(:openai) do + described_class.new(api: "test_api", messages: "Hello OpenAI", temperature: 0.8) + end + let(:instance) { described_class.new } + + describe "#initialize" do + it "initializes instance variables correctly" do + expect(openai.api).to eq("test_api") + expect(openai.messages).to eq("Hello OpenAI") + expect(openai.temperature).to eq(0.8) + end + + it "uses default temperature if none is provided" do + expect(instance.temperature).to eq(0.7) + end + end + + describe "#models" do + it "returns all available OpenAI models" do + expect(instance.models).to include( + "gpt-3.5-turbo" => "gpt-3.5-turbo", + "gpt-4" => "gpt-4", + "gpt-4-32k" => "gpt-4-32k", + "gpt-4-turbo" => "gpt-4-turbo", + "gpt-4o-mini" => "gpt-4o-mini", + "o1-mini" => "o1-mini", + "o1-preview" => "o1-preview", + "text-davinci-003" => "text-davinci-003" + ) + end + end + + describe "#build_http_body" do + before do + stub_const("Configuration::MODELS", { + "openai" => { + "gpt-3.5-turbo" => "gpt-3.5-turbo" + } + }) + end + + it "builds a correct request body with given params" do + result = instance.build_http_body("Hi there", "gpt-3.5-turbo", 0.6) + expect(result[:model]).to eq("gpt-3.5-turbo") + expect(result[:messages]).to eq([{ role: "user", content: "Hi there" }]) + expect(result[:temperature]).to eq(0.6) + end + end + + describe "#build_http_headers" do + before do + allow(RubyAI).to receive_message_chain(:config, :openai, + :api).and_return("fallback_token") + end + + it "returns headers with Authorization using fallback token" do + headers = instance.build_http_headers("openai") + expect(headers[:Authorization]).to eq("Bearer fallback_token") + end + end +end diff --git a/spec/rubyai/rubyai_spec.rb b/spec/rubyai/rubyai_spec.rb new file mode 100644 index 0000000..d751f92 --- /dev/null +++ b/spec/rubyai/rubyai_spec.rb @@ -0,0 +1,43 @@ +require_relative "../../lib/rubyai" + +require "spec_helper" + +RSpec.describe RubyAI do + describe ".models" do + it "returns a hash of provider models" do + models = described_class.models + expect(models).to be_a(Hash) + expect(models.keys).to include("openai", "anthropic", "gemini") + expect(models["openai"]).to be_a(Hash) + end + end + + describe ".config" do + it "returns a Configuration instance" do + config = described_class.config + expect(config).to be_a(RubyAI::Configuration) + end + + it "returns a memoized instance" do + expect(described_class.config.object_id).to eq(described_class.config.object_id) + end + end + + describe ".configure" do + it "yields the configuration for modification" do + expect do + described_class.configure do |config| + config.openai.temperature = 0.99 + end + end.not_to raise_error + + expect(described_class.config.openai.temperature).to eq(0.99) + end + end + + describe "RubyAI::Error" do + it "inherits from StandardError" do + expect(RubyAI::Error.new).to be_a(StandardError) + end + end +end diff --git a/spec/spec_helper.rb b/spec/spec_helper.rb index c80d44b..4a323fa 100644 --- a/spec/spec_helper.rb +++ b/spec/spec_helper.rb @@ -44,55 +44,53 @@ # triggering implicit auto-inclusion in groups with matching metadata. config.shared_context_metadata_behavior = :apply_to_host_groups -# The settings below are suggested to provide a good initial experience -# with RSpec, but feel free to customize to your heart's content. -=begin - # This allows you to limit a spec run to individual examples or groups - # you care about by tagging them with `:focus` metadata. When nothing - # is tagged with `:focus`, all examples get run. RSpec also provides - # aliases for `it`, `describe`, and `context` that include `:focus` - # metadata: `fit`, `fdescribe` and `fcontext`, respectively. - config.filter_run_when_matching :focus - - # Allows RSpec to persist some state between runs in order to support - # the `--only-failures` and `--next-failure` CLI options. We recommend - # you configure your source control system to ignore this file. - config.example_status_persistence_file_path = "spec/examples.txt" - - # Limits the available syntax to the non-monkey patched syntax that is - # recommended. For more details, see: - # https://rspec.info/features/3-12/rspec-core/configuration/zero-monkey-patching-mode/ - config.disable_monkey_patching! - - # This setting enables warnings. It's recommended, but in some cases may - # be too noisy due to issues in dependencies. - config.warnings = true - - # Many RSpec users commonly either run the entire suite or an individual - # file, and it's useful to allow more verbose output when running an - # individual spec file. - if config.files_to_run.one? - # Use the documentation formatter for detailed output, - # unless a formatter has already been configured - # (e.g. via a command-line flag). - config.default_formatter = "doc" - end - - # Print the 10 slowest examples and example groups at the - # end of the spec run, to help surface which specs are running - # particularly slow. - config.profile_examples = 10 - - # Run specs in random order to surface order dependencies. If you find an - # order dependency and want to debug it, you can fix the order by providing - # the seed, which is printed after each run. - # --seed 1234 - config.order = :random - - # Seed global randomization in this process using the `--seed` CLI option. - # Setting this allows you to use `--seed` to deterministically reproduce - # test failures related to randomization by passing the same `--seed` value - # as the one that triggered the failure. - Kernel.srand config.seed -=end + # The settings below are suggested to provide a good initial experience + # with RSpec, but feel free to customize to your heart's content. + # # This allows you to limit a spec run to individual examples or groups + # # you care about by tagging them with `:focus` metadata. When nothing + # # is tagged with `:focus`, all examples get run. RSpec also provides + # # aliases for `it`, `describe`, and `context` that include `:focus` + # # metadata: `fit`, `fdescribe` and `fcontext`, respectively. + # config.filter_run_when_matching :focus + # + # # Allows RSpec to persist some state between runs in order to support + # # the `--only-failures` and `--next-failure` CLI options. We recommend + # # you configure your source control system to ignore this file. + # config.example_status_persistence_file_path = "spec/examples.txt" + # + # # Limits the available syntax to the non-monkey patched syntax that is + # # recommended. For more details, see: + # # https://rspec.info/features/3-12/rspec-core/configuration/zero-monkey-patching-mode/ + # config.disable_monkey_patching! + # + # # This setting enables warnings. It's recommended, but in some cases may + # # be too noisy due to issues in dependencies. + # config.warnings = true + # + # # Many RSpec users commonly either run the entire suite or an individual + # # file, and it's useful to allow more verbose output when running an + # # individual spec file. + # if config.files_to_run.one? + # # Use the documentation formatter for detailed output, + # # unless a formatter has already been configured + # # (e.g. via a command-line flag). + # config.default_formatter = "doc" + # end + # + # # Print the 10 slowest examples and example groups at the + # # end of the spec run, to help surface which specs are running + # # particularly slow. + # config.profile_examples = 10 + # + # # Run specs in random order to surface order dependencies. If you find an + # # order dependency and want to debug it, you can fix the order by providing + # # the seed, which is printed after each run. + # # --seed 1234 + # config.order = :random + # + # # Seed global randomization in this process using the `--seed` CLI option. + # # Setting this allows you to use `--seed` to deterministically reproduce + # # test failures related to randomization by passing the same `--seed` value + # # as the one that triggered the failure. + # Kernel.srand config.seed end