diff --git a/config/feature_flags/development/gitlab_duo.yml b/config/feature_flags/development/gitlab_duo.yml
new file mode 100644
index 0000000000000000000000000000000000000000..5fce9acf12ff42ead6d24b9bc321c4f3d629674c
--- /dev/null
+++ b/config/feature_flags/development/gitlab_duo.yml
@@ -0,0 +1,8 @@
+---
+name: gitlab_duo
+introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/122235
+rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/413688
+milestone: '16.1'
+type: development
+group: group::ai-enablement
+default_enabled: false
diff --git a/doc/api/graphql/reference/index.md b/doc/api/graphql/reference/index.md
index cf3a5e3a541e9f4c3fc4b0f3974403de70a70030..83076cab73ad97de57e31a1396e336264ec96b00 100644
--- a/doc/api/graphql/reference/index.md
+++ b/doc/api/graphql/reference/index.md
@@ -1038,6 +1038,7 @@ Input type: `AiActionInput`
 
 | Name | Type | Description |
 | ---- | ---- | ----------- |
+| <a id="mutationaiactionchat"></a>`chat` | [`AiChatInput`](#aichatinput) | Input for chat AI action. |
 | <a id="mutationaiactionclientmutationid"></a>`clientMutationId` | [`String`](#string) | A unique identifier for the client performing the mutation. |
 | <a id="mutationaiactionexplaincode"></a>`explainCode` | [`AiExplainCodeInput`](#aiexplaincodeinput) | Input for explain_code AI action. |
 | <a id="mutationaiactionexplainvulnerability"></a>`explainVulnerability` | [`AiExplainVulnerabilityInput`](#aiexplainvulnerabilityinput) | Input for explain_vulnerability AI action. |
@@ -27840,6 +27841,15 @@ be used as arguments).
 Only general use input types are listed here. For mutation input types,
 see the associated mutation type above.
 
+### `AiChatInput`
+
+#### Arguments
+
+| Name | Type | Description |
+| ---- | ---- | ----------- |
+| <a id="aichatinputcontent"></a>`content` | [`String!`](#string) | Content of the message. |
+| <a id="aichatinputresourceid"></a>`resourceId` | [`AiModelID!`](#aimodelid) | Global ID of the resource to mutate. |
+
 ### `AiExplainCodeInput`
 
 #### Arguments
diff --git a/ee/app/graphql/types/ai/chat_input_type.rb b/ee/app/graphql/types/ai/chat_input_type.rb
new file mode 100644
index 0000000000000000000000000000000000000000..221ec3db0b2130c2cc14eaaafeba7ce7a433c6d0
--- /dev/null
+++ b/ee/app/graphql/types/ai/chat_input_type.rb
@@ -0,0 +1,14 @@
+# frozen_string_literal: true
+
+module Types
+  module Ai
+    class ChatInputType < BaseMethodInputType
+      graphql_name 'AiChatInput'
+
+      argument :content, GraphQL::Types::String,
+        required: true,
+        validates: { allow_blank: false },
+        description: 'Content of the message.'
+    end
+  end
+end
diff --git a/ee/app/models/gitlab_subscriptions/features.rb b/ee/app/models/gitlab_subscriptions/features.rb
index bd488f36860ecfb67da33d3d5d98c27c30a6da4e..00eb8ec7fdc8a1d1df9edf1a6a5716cfc3a30771 100644
--- a/ee/app/models/gitlab_subscriptions/features.rb
+++ b/ee/app/models/gitlab_subscriptions/features.rb
@@ -172,6 +172,7 @@ class Features
     ].freeze
 
     ULTIMATE_FEATURES = %i[
+      ai_chat
       ai_config_chat
       ai_features
       ai_git_command
diff --git a/ee/app/services/llm/chat_service.rb b/ee/app/services/llm/chat_service.rb
new file mode 100644
index 0000000000000000000000000000000000000000..60b8f38819f1cfef701dbc8ab54dfa9a9bf334bb
--- /dev/null
+++ b/ee/app/services/llm/chat_service.rb
@@ -0,0 +1,24 @@
+# frozen_string_literal: true
+
+module Llm
+  class ChatService < BaseService
+    private
+
+    def perform
+      worker_perform(user, resource, :chat, options)
+    end
+
+    def valid?
+      super &&
+        resource.resource_parent.licensed_feature_available?(:ai_chat) &&
+        Gitlab::Llm::StageCheck.available?(resource.resource_parent.root_ancestor, :chat) &&
+        Feature.enabled?(:gitlab_duo, user)
+    end
+
+    # We need to broadcast this content over the websocket as well
+    # https://gitlab.com/gitlab-org/gitlab/-/issues/413600
+    def content(_action_name)
+      options[:content]
+    end
+  end
+end
diff --git a/ee/app/services/llm/execute_method_service.rb b/ee/app/services/llm/execute_method_service.rb
index 7d6d4aa18de26cdabe954e318795e2a5fedc82ea..d0d4107c01cacdedf9df62e4666e4f7a28d0b313 100644
--- a/ee/app/services/llm/execute_method_service.rb
+++ b/ee/app/services/llm/execute_method_service.rb
@@ -12,7 +12,8 @@ class ExecuteMethodService < BaseService
       tanuki_bot: Llm::TanukiBotService,
       generate_test_file: Llm::GenerateTestFileService,
       generate_description: Llm::GenerateDescriptionService,
-      generate_commit_message: Llm::GenerateCommitMessageService
+      generate_commit_message: Llm::GenerateCommitMessageService,
+      chat: Llm::ChatService
     }.freeze
 
     def initialize(user, resource, method, options = {})
diff --git a/ee/lib/gitlab/llm/chain/response_modifier.rb b/ee/lib/gitlab/llm/chain/response_modifier.rb
new file mode 100644
index 0000000000000000000000000000000000000000..5f1eaa090ae20439a21ed879f6c60212fb88e4ae
--- /dev/null
+++ b/ee/lib/gitlab/llm/chain/response_modifier.rb
@@ -0,0 +1,21 @@
+# frozen_string_literal: true
+
+module Gitlab
+  module Llm
+    module Chain
+      class ResponseModifier < Gitlab::Llm::BaseResponseModifier
+        def initialize(answer)
+          @ai_response = answer
+        end
+
+        def response_body
+          @response_body ||= ai_response.content
+        end
+
+        def errors
+          @errors ||= ai_response.status == :error ? [ai_response.content] : []
+        end
+      end
+    end
+  end
+end
diff --git a/ee/lib/gitlab/llm/completions/chat.rb b/ee/lib/gitlab/llm/completions/chat.rb
new file mode 100644
index 0000000000000000000000000000000000000000..f34362f414d9ab7a805a93a85666512f233b892b
--- /dev/null
+++ b/ee/lib/gitlab/llm/completions/chat.rb
@@ -0,0 +1,34 @@
+# frozen_string_literal: true
+
+module Gitlab
+  module Llm
+    module Completions
+      class Chat < Base
+        TOOLS = [Gitlab::Llm::Chain::Tools::IssueIdentifier].freeze
+
+        def execute(user, resource, options)
+          # The Agent currently only supports Vertex as it relies on VertexAi::Client specific methods.
+          client = ::Gitlab::Llm::VertexAi::Client.new(user)
+          context = ::Gitlab::Llm::Chain::GitlabContext.new(
+            current_user: user,
+            container: resource.resource_parent.root_ancestor,
+            resource: resource,
+            ai_client: client
+          )
+
+          response = Gitlab::Llm::Chain::Agents::ZeroShot.new(
+            user_input: options[:content],
+            tools: TOOLS,
+            context: context
+          ).execute
+
+          response_modifier = Gitlab::Llm::Chain::ResponseModifier.new(response)
+
+          ::Gitlab::Llm::GraphqlSubscriptionResponseService
+            .new(user, resource, response_modifier, options: { request_id: params[:request_id] })
+            .execute
+        end
+      end
+    end
+  end
+end
diff --git a/ee/lib/gitlab/llm/completions_factory.rb b/ee/lib/gitlab/llm/completions_factory.rb
index d4d202a47a90266d70e0269020b4f20e6f4a888a..e19bcdaa3c452dd5d69d899769a2041f8bc97f9c 100644
--- a/ee/lib/gitlab/llm/completions_factory.rb
+++ b/ee/lib/gitlab/llm/completions_factory.rb
@@ -35,6 +35,10 @@ class CompletionsFactory
         generate_commit_message: {
           service_class: ::Gitlab::Llm::VertexAi::Completions::GenerateCommitMessage,
           prompt_class: ::Gitlab::Llm::Templates::GenerateCommitMessage
+        },
+        chat: {
+          service_class: ::Gitlab::Llm::Completions::Chat,
+          prompt_class: nil
         }
       }.freeze
 
diff --git a/ee/lib/gitlab/llm/stage_check.rb b/ee/lib/gitlab/llm/stage_check.rb
index 1634000e1dbada6ef85b0b08e41fcb4738b9d321..5d4125895f8b10a4500c62dcca78345bffa37a95 100644
--- a/ee/lib/gitlab/llm/stage_check.rb
+++ b/ee/lib/gitlab/llm/stage_check.rb
@@ -11,7 +11,8 @@ class StageCheck
         :generate_test_file,
         :summarize_diff,
         :explain_vulnerability,
-        :generate_commit_message
+        :generate_commit_message,
+        :chat
       ].freeze
       BETA_FEATURES = [].freeze
       THIRD_PARTY_FEATURES = EXPERIMENTAL_FEATURES + BETA_FEATURES
diff --git a/ee/spec/lib/gitlab/llm/chain/response_modifier_spec.rb b/ee/spec/lib/gitlab/llm/chain/response_modifier_spec.rb
new file mode 100644
index 0000000000000000000000000000000000000000..499be999b3a8c944260306fb4c45f5a1799de5ea
--- /dev/null
+++ b/ee/spec/lib/gitlab/llm/chain/response_modifier_spec.rb
@@ -0,0 +1,28 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+
+RSpec.describe Gitlab::Llm::Chain::ResponseModifier, feature_category: :shared do
+  let(:content) { "This is the summary" }
+  let(:context) { instance_double(Gitlab::Llm::Chain::GitlabContext) }
+  let(:status) { :ok }
+  let(:answer) do
+    ::Gitlab::Llm::Chain::Answer.new(
+      status: status, context: context, content: content, tool: nil, is_final: true
+    )
+  end
+
+  context 'on success' do
+    subject { described_class.new(answer).response_body }
+
+    it { is_expected.to eq content }
+  end
+
+  context 'on error' do
+    let(:status) { :error }
+
+    subject { described_class.new(answer).errors }
+
+    it { is_expected.to eq [content] }
+  end
+end
diff --git a/ee/spec/lib/gitlab/llm/completions/chat_spec.rb b/ee/spec/lib/gitlab/llm/completions/chat_spec.rb
new file mode 100644
index 0000000000000000000000000000000000000000..e967fd4260371c0564eeb379230d80e3d913449e
--- /dev/null
+++ b/ee/spec/lib/gitlab/llm/completions/chat_spec.rb
@@ -0,0 +1,45 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+
+RSpec.describe Gitlab::Llm::Completions::Chat, feature_category: :shared do
+  let_it_be(:user) { create(:user) }
+  let_it_be(:group) { create(:group) }
+  let_it_be(:project) { create(:project, group: group) }
+  let_it_be(:resource) { create(:issue, project: project) }
+
+  let(:content) { 'Summarize issue' }
+  let(:ai_client) { instance_double(Gitlab::Llm::VertexAi::Client) }
+  let(:context) { instance_double(Gitlab::Llm::Chain::GitlabContext) }
+  let(:options) { { request_id: 'uuid', content: content } }
+  let(:answer) do
+    ::Gitlab::Llm::Chain::Answer.new(
+      status: :ok, context: context, content: content, tool: nil, is_final: true
+    )
+  end
+
+  subject { described_class.new(nil).execute(user, resource, options) }
+
+  describe '#execute' do
+    before do
+      allow(::Gitlab::Llm::VertexAi::Client).to receive(:new).and_return(ai_client)
+    end
+
+    it 'calls the ZeroShot Agent with the right parameters' do
+      expected_params = [
+        user_input: content,
+        tools: match_array([::Gitlab::Llm::Chain::Tools::IssueIdentifier]),
+        context: context
+      ]
+
+      expect_next_instance_of(::Gitlab::Llm::Chain::Agents::ZeroShot, *expected_params) do |instance|
+        expect(instance).to receive(:execute).and_return(answer)
+      end
+
+      expect(::Gitlab::Llm::Chain::GitlabContext).to receive(:new)
+        .with(current_user: user, container: group, resource: resource, ai_client: ai_client).and_return(context)
+
+      subject
+    end
+  end
+end
diff --git a/ee/spec/requests/api/graphql/mutations/projects/chat_spec.rb b/ee/spec/requests/api/graphql/mutations/projects/chat_spec.rb
new file mode 100644
index 0000000000000000000000000000000000000000..46d4f1f72cd96aba145e67e60b1c8b3763a99f5c
--- /dev/null
+++ b/ee/spec/requests/api/graphql/mutations/projects/chat_spec.rb
@@ -0,0 +1,78 @@
+# frozen_string_literal: true
+
+require "spec_helper"
+
+RSpec.describe 'AiAction for chat', :saas, feature_category: :shared do
+  include GraphqlHelpers
+
+  let_it_be(:group) { create(:group_with_plan, :public, plan: :ultimate_plan) }
+  let_it_be(:project) { create(:project, :public, group: group) }
+  let_it_be(:current_user) { create(:user, developer_projects: [project]) }
+  let_it_be(:resource) { create(:issue, project: project) }
+
+  let(:mutation) do
+    params = { chat: { resource_id: resource.to_gid, content: "summarize" } }
+
+    graphql_mutation(:ai_action, params) do
+      <<-QL.strip_heredoc
+        errors
+      QL
+    end
+  end
+
+  before do
+    stub_ee_application_setting(should_check_namespace_plan: true)
+    stub_licensed_features(ai_chat: true, ai_features: true)
+    group.namespace_settings.update!(third_party_ai_features_enabled: true, experiment_features_enabled: true)
+  end
+
+  it 'successfully performs an explain code request' do
+    expect(Llm::CompletionWorker).to receive(:perform_async).with(
+      current_user.id, resource.id, "Issue", :chat, {
+        content: "summarize", markup_format: :raw, request_id: an_instance_of(String)
+      }
+    )
+
+    post_graphql_mutation(mutation, current_user: current_user)
+
+    expect(graphql_mutation_response(:ai_action)['errors']).to eq([])
+  end
+
+  context 'when openai_experimentation feature flag is disabled' do
+    before do
+      stub_feature_flags(openai_experimentation: false)
+    end
+
+    it 'returns nil' do
+      expect(Llm::CompletionWorker).not_to receive(:perform_async)
+
+      post_graphql_mutation(mutation, current_user: current_user)
+
+      expect(fresh_response_data['errors'][0]['message']).to eq("`openai_experimentation` feature flag is disabled.")
+    end
+  end
+
+  context 'when third_party_ai_features_enabled disabled' do
+    before do
+      group.namespace_settings.update!(third_party_ai_features_enabled: false)
+    end
+
+    it 'returns nil' do
+      expect(Llm::CompletionWorker).not_to receive(:perform_async)
+
+      post_graphql_mutation(mutation, current_user: current_user)
+    end
+  end
+
+  context 'when experiment_features_enabled disabled' do
+    before do
+      group.namespace_settings.update!(experiment_features_enabled: false)
+    end
+
+    it 'returns nil' do
+      expect(Llm::CompletionWorker).not_to receive(:perform_async)
+
+      post_graphql_mutation(mutation, current_user: current_user)
+    end
+  end
+end
diff --git a/ee/spec/services/llm/chat_service_spec.rb b/ee/spec/services/llm/chat_service_spec.rb
new file mode 100644
index 0000000000000000000000000000000000000000..2821f319f43ab25a0f68889cce0620ece0345651
--- /dev/null
+++ b/ee/spec/services/llm/chat_service_spec.rb
@@ -0,0 +1,77 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+
+RSpec.describe Llm::ChatService, :saas, feature_category: :shared do
+  let_it_be(:group) { create(:group_with_plan, plan: :ultimate_plan) }
+  let_it_be(:user) { create(:user) }
+  let_it_be(:project) { create(:project, group: group) }
+  let_it_be(:issue) { create(:issue, project: project) }
+
+  let(:stage_check_available) { true }
+
+  let_it_be(:options) do
+    {
+      content: "Summarize issue"
+    }
+  end
+
+  subject { described_class.new(user, issue, options) }
+
+  before do
+    stub_licensed_features(ai_chat: true)
+    stub_feature_flags(gitlab_duo: user)
+    project.add_guest(user)
+
+    allow(Gitlab::Llm::StageCheck).to receive(:available?).with(group, :chat).and_return(stage_check_available)
+  end
+
+  describe '#perform' do
+    it_behaves_like 'completion worker sync and async' do
+      let(:resource) { issue }
+      let(:action_name) { :chat }
+      let(:content) { 'Summarize issue' }
+    end
+
+    context 'when gitlab_duo feature flag is disabled' do
+      before do
+        stub_feature_flags(gitlab_duo: false)
+      end
+
+      it 'returns an error' do
+        expect(Llm::CompletionWorker).not_to receive(:perform_async)
+
+        expect(subject.execute).to be_error
+      end
+    end
+
+    context 'when ai_chat licensed feature is disabled' do
+      before do
+        stub_licensed_features(ai_chat: false)
+      end
+
+      it 'returns an error' do
+        expect(Llm::CompletionWorker).not_to receive(:perform_async)
+
+        expect(subject.execute).to be_error
+      end
+    end
+
+    it 'returns an error if user is not a member of the project' do
+      project.team.truncate
+
+      expect(Llm::CompletionWorker).not_to receive(:perform_async)
+
+      expect(subject.execute).to be_error
+    end
+
+    context 'when namespace is not allowed to send data' do
+      let(:stage_check_available) { false }
+
+      it 'returns an error if user is not a member of the project' do
+        expect(Llm::CompletionWorker).not_to receive(:perform_async)
+        expect(subject.execute).to be_error
+      end
+    end
+  end
+end