diff --git a/ee/lib/gitlab/llm/tanuki_bot.rb b/ee/lib/gitlab/llm/tanuki_bot.rb
index edc66b3a96ce9dc93573e219f092416e65f24678..ff09ffffcd082732e63307553f01eb26856759c3 100644
--- a/ee/lib/gitlab/llm/tanuki_bot.rb
+++ b/ee/lib/gitlab/llm/tanuki_bot.rb
@@ -60,8 +60,15 @@ def execute(&block)
 
       # Note: a Rake task is using this method to extract embeddings for a test fixture.
       def embedding_for_question(question)
-        embeddings_result = vertex_client.text_embeddings(content: question)
-        embeddings_result['predictions'].first['embeddings']['values']
+        result = vertex_client.text_embeddings(content: question)
+
+        if !result.success? || !result.has_key?('predictions')
+          logger.info_or_debug(current_user, message: "Could not generate embeddings",
+            error: result.dig('error', 'message'))
+          nil
+        else
+          result['predictions'].first&.dig('embeddings', 'values')
+        end
       end
 
       # Note: a Rake task is using this method to extract embeddings for a test fixture.
diff --git a/ee/spec/lib/gitlab/llm/tanuki_bot_spec.rb b/ee/spec/lib/gitlab/llm/tanuki_bot_spec.rb
index 43f10db8c800c0b68476e87669bf55917f374da7..50da1eeb76a808fd78f374bbdc1ac02746326e43 100644
--- a/ee/spec/lib/gitlab/llm/tanuki_bot_spec.rb
+++ b/ee/spec/lib/gitlab/llm/tanuki_bot_spec.rb
@@ -271,6 +271,7 @@
 
         context 'when user has AI features enabled' do
           before do
+            allow(vertex_response).to receive(:success?).and_return(true)
             allow(::Gitlab::Llm::VertexAi::Client).to receive(:new).and_return(vertex_client)
             allow(::Gitlab::Llm::Anthropic::Client).to receive(:new).and_return(anthropic_client)
             allow(described_class).to receive(:enabled_for?).and_return(true)
@@ -310,8 +311,8 @@
             embeddings
 
             allow(anthropic_client).to receive(:stream).once
-                                         .and_yield({ "completion" => answer })
-                                         .and_return(completion_response)
+                                          .and_yield({ "completion" => answer })
+                                          .and_return(completion_response)
 
             expect(vertex_client).to receive(:text_embeddings).with(**vertex_args).and_return(vertex_response)
 
@@ -337,6 +338,52 @@
               expect(execute.response_body).to eq(unsupported_response_message)
             end
           end
+
+          context 'when searching for embeddings' do
+            let(:vertex_error_response) { { "error" => { "message" => "some error" } } }
+
+            before do
+              allow(vertex_error_response).to receive(:success?).and_return(true)
+              allow(vertex_client).to receive(:text_embeddings).with(**vertex_args).and_return(vertex_error_response)
+            end
+
+            context 'when the embeddings request is unsuccesful' do
+              before do
+                allow(vertex_error_response).to receive(:success?).and_return(false)
+              end
+
+              it 'logs an error message' do
+                expect(logger).to receive(:info_or_debug).with(user, message: "Could not generate embeddings",
+                  error: "some error")
+                expect(execute.response_body).to eq(empty_response_message)
+                execute
+              end
+            end
+
+            context 'when the embeddings request has no predictions' do
+              let(:empty) { { "predictions" => [] } }
+
+              before do
+                allow(empty).to receive(:success?).and_return(true)
+                allow(vertex_client).to receive(:text_embeddings).with(**vertex_args).and_return(empty)
+              end
+
+              it 'returns empty response' do
+                expect(execute.response_body).to eq(empty_response_message)
+                execute
+              end
+            end
+          end
+        end
+
+        context 'when ai_global_switch FF is disabled' do
+          before do
+            stub_feature_flags(ai_global_switch: false)
+          end
+
+          it 'returns an empty response message' do
+            expect(execute.response_body).to eq(empty_response_message)
+          end
         end
       end