Skip to content
代码片段 群组 项目
未验证 提交 a226985f 编辑于 作者: Patrick Bajao's avatar Patrick Bajao 提交者: GitLab
浏览文件

Merge branch 'aigw-completions-improvements' into 'master'

Improve adaptability of AIGW completions

See merge request https://gitlab.com/gitlab-org/gitlab/-/merge_requests/167835



Merged-by: default avatarPatrick Bajao <ebajao@gitlab.com>
Approved-by: default avatarNathan Weinshenker <nweinshenker@gitlab.com>
Approved-by: default avatarPatrick Bajao <ebajao@gitlab.com>
Reviewed-by: default avatarAlejandro Rodríguez <alejandro@gitlab.com>
Reviewed-by: default avatarPatrick Bajao <ebajao@gitlab.com>
Reviewed-by: default avatarNathan Weinshenker <nweinshenker@gitlab.com>
Reviewed-by: default avatarDuo Code Reviewer <duo-code-review-bot@gitlab.com>
Co-authored-by: default avatarAlejandro Rodríguez <alejandro@gitlab.com>
No related branches found
No related tags found
无相关合并请求
......@@ -5,28 +5,53 @@ module Llm
module AiGateway
module Completions
class Base < Llm::Completions::Base
DEFAULT_ERROR = 'An unexpected error has occurred.'
RESPONSE_MODIFIER = ResponseModifiers::Base
def execute
return unless valid?
response = request!
response_modifier = ResponseModifiers::Base.new(response)
response_modifier = self.class::RESPONSE_MODIFIER.new(post_process(response))
::Gitlab::Llm::GraphqlSubscriptionResponseService.new(
user, resource, response_modifier, options: response_options
).execute
end
# Subclasses must implement this method returning a Hash with all the needed input.
# An `ArgumentError` can be emitted to signal an error extracting data from the `prompt_message`
def inputs
raise NotImplementedError
end
private
# Can be overwritten by child classes to perform additional validations
def valid?
true
end
# Can be used by subclasses to perform additional steps or transformations before returning the response data
def post_process(response)
response
end
def request!
ai_client = ::Gitlab::Llm::AiGateway::Client.new(user, service_name: prompt_message.ai_action.to_sym,
tracking_context: tracking_context)
ai_client.complete(
response = ai_client.complete(
url: "#{::Gitlab::AiGateway.url}/v1/prompts/#{prompt_message.ai_action}",
body: { 'inputs' => inputs }
)
Gitlab::Json.parse(response.body)
rescue ArgumentError => e
{ 'detail' => e.message }
rescue StandardError => e
Gitlab::ErrorTracking.track_exception(e, ai_action: prompt_message.ai_action)
{ 'detail' => DEFAULT_ERROR }
end
end
end
......
......@@ -5,23 +5,29 @@ module Llm
module AiGateway
module ResponseModifiers
class Base < Gitlab::Llm::BaseResponseModifier
extend ::Gitlab::Utils::Override
def initialize(ai_response)
@ai_response = Gitlab::Json.parse(ai_response.body)
@ai_response = ai_response
end
override :response_body
def response_body
ai_response
end
override :errors
def errors
# On success, the response is just a plain JSON string
@errors ||= if ai_response.is_a?(String)
[]
else
detail = ai_response&.dig('detail')
@errors ||= ai_response.is_a?(String) ? [] : error_from_response
end
private
def error_from_response
detail = ai_response['detail']
[detail.is_a?(String) ? detail : detail&.dig(0, 'msg')].compact
end
[detail.is_a?(String) ? detail : detail&.dig(0, 'msg')].compact
end
end
end
......
......@@ -3,13 +3,15 @@
require 'spec_helper'
RSpec.describe Gitlab::Llm::AiGateway::Completions::Base, feature_category: :ai_abstraction_layer do
let(:subclass) { Class.new(described_class) }
let(:user) { build(:user) }
let(:resource) { build(:issue) }
let(:ai_action) { 'test_action' }
let(:prompt_message) { build(:ai_message, ai_action: ai_action, user: user, resource: resource) }
let(:inputs) { { prompt: "What's your name?" } }
let(:response) { instance_double(HTTParty::Response, body: "I'm Duo!") }
let(:response) { "I'm Duo" }
let(:http_response) { instance_double(HTTParty::Response, body: %("#{response}")) }
let(:processed_repsonse) { response }
let(:response_modifier_class) { Gitlab::Llm::AiGateway::ResponseModifiers::Base }
let(:response_modifier) { instance_double(Gitlab::Llm::AiGateway::ResponseModifiers::Base) }
let(:response_service) { instance_double(Gitlab::Llm::GraphqlSubscriptionResponseService) }
let(:tracking_context) { { action: ai_action, request_id: prompt_message.request_id } }
......@@ -18,35 +20,107 @@
prompt_message.to_h.slice(:request_id, :client_subscription_id, :ai_action, :agent_version_id)
end
let(:subclass) do
prompt_inputs = inputs
Class.new(described_class) do
define_method :inputs do
prompt_inputs
end
end
end
subject(:completion) { subclass.new(prompt_message, nil) }
describe 'required methods' do
let(:subclass) { Class.new(described_class) }
it { expect { completion.inputs }.to raise_error(NotImplementedError) }
end
describe '#execute' do
before do
allow(completion).to receive(:inputs).and_return(inputs)
allow(Gitlab::Llm::AiGateway::Client).to receive(:new)
.with(user, service_name: ai_action.to_sym, tracking_context: tracking_context).and_return(client)
allow(client).to receive(:complete).with(url: "#{Gitlab::AiGateway.url}/v1/prompts/#{ai_action}",
body: { 'inputs' => inputs })
.and_return(response)
allow(Gitlab::Llm::AiGateway::ResponseModifiers::Base).to receive(:new).with(response)
.and_return(response_modifier)
allow(Gitlab::Llm::GraphqlSubscriptionResponseService).to receive(:new)
.with(user, resource, response_modifier, options: response_options).and_return(response_service)
end
let(:result) { { status: :success } }
subject(:execute) { completion.execute }
it 'executes the response service and returns its result' do
expect(response_service).to receive(:execute).and_return(result)
shared_examples 'executing successfully' do
it 'executes the response service and returns its result' do
if http_response
expect(client).to receive(:complete).with(url: "#{Gitlab::AiGateway.url}/v1/prompts/#{ai_action}",
body: { 'inputs' => inputs })
.and_return(http_response)
end
expect(response_modifier_class).to receive(:new).with(processed_repsonse)
.and_return(response_modifier)
expect(Gitlab::Llm::GraphqlSubscriptionResponseService).to receive(:new)
.with(user, resource, response_modifier, options: response_options).and_return(response_service)
expect(response_service).to receive(:execute).and_return(result)
is_expected.to be(result)
end
end
it_behaves_like 'executing successfully'
context 'when the completion is not valid' do
before do
subclass.define_method(:valid?) { false }
end
it 'returns nil without making a request' do
expect(client).not_to receive(:complete)
expect(execute).to be_nil
end
end
context 'when the subclass raises an ArgumentError when gathering inputs' do
let(:http_response) { nil }
let(:processed_repsonse) { { 'detail' => 'Something went wrong.' } }
before do
subclass.define_method(:inputs) { raise ArgumentError, 'Something went wrong.' }
end
# Note: The completion "executes successfully" in that it relays the error to the user via GraphQL, which we check
# by changing the `let(:processed_repsonse)` in this context
it_behaves_like 'executing successfully'
end
context 'when an unexpected error is raised' do
let(:processed_repsonse) { { 'detail' => 'An unexpected error has occurred.' } }
before do
allow(Gitlab::Json).to receive(:parse).and_raise(StandardError)
end
it_behaves_like 'executing successfully'
end
context 'when the subclass overrides the post_process method' do
let(:processed_repsonse) { response.upcase }
before do
subclass.define_method(:post_process) { |response| response.upcase }
end
it_behaves_like 'executing successfully'
end
context 'when the subclass overrides the response modifier' do
let(:response_modifier_class) { Class.new }
before do
subclass.const_set(:RESPONSE_MODIFIER, response_modifier_class)
end
expect(execute).to be(result)
it_behaves_like 'executing successfully'
end
end
end
......@@ -3,14 +3,12 @@
require 'spec_helper'
RSpec.describe Gitlab::Llm::AiGateway::ResponseModifiers::Base, feature_category: :ai_abstraction_layer do
let(:response) { "I'm GitLab Duo!" }
let(:response_body) { %("#{response}") }
let(:ai_response) { instance_double(HTTParty::Response, body: response_body) }
let(:ai_response) { %("I'm GitLab Duo") }
let(:base_modifier) { described_class.new(ai_response) }
describe '#response_body' do
it 'returns the parsed response body' do
expect(base_modifier.response_body).to eq(response)
it 'returns the response body' do
expect(base_modifier.response_body).to eq(ai_response)
end
end
......@@ -25,7 +23,7 @@
let(:error) { 'Error message' }
context 'when the detail is an string' do
let(:response_body) { %({"detail": "#{error}"}) }
let(:ai_response) { { 'detail' => error } }
it 'returns an array with the error message' do
expect(base_modifier.errors).to eq([error])
......@@ -33,7 +31,7 @@
end
context 'when the detail is an array' do
let(:response_body) { %({"detail": [{"msg": "#{error}"}]}) }
let(:ai_response) { { 'detail' => [{ 'msg' => error }] } }
it 'returns an array with the error message' do
expect(base_modifier.errors).to eq([error])
......
0% 加载中 .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册