diff --git a/doc/administration/self_hosted_models/install_infrastructure.md b/doc/administration/self_hosted_models/install_infrastructure.md
index 15f6c4481ee58ef903886c4d016bfb18a3f23722..31e08d903e884442fb6ca8792fbcc2352cdb3bc0 100644
--- a/doc/administration/self_hosted_models/install_infrastructure.md
+++ b/doc/administration/self_hosted_models/install_infrastructure.md
@@ -62,52 +62,6 @@ installed LLM:
 - [TensorRT-LLM](https://docs.mistral.ai/deployment/self-deployment/overview/)
 - [Ollama and litellm](litellm_proxy_setup.md)
 
-#### Litellm config examples for quickly getting started with Ollama
-
-```yaml
-model_list:
-  - model_name: mistral
-    litellm_params:
-      model: ollama/mistral:latest
-      api_base: YOUR_HOSTING_SERVER
-  - model_name: mixtral
-    litellm_params:
-      model: ollama/mixtral:latest
-      api_base: YOUR_HOSTING_SERVER
-  - model_name: codegemma
-    litellm_params:
-      model: ollama/codegemma
-      api_base: YOUR_HOSTING_SERVER
-  - model_name: codestral
-    litellm_params:
-      model: ollama/codestral
-      api_base: YOUR_HOSTING_SERVER
-  - model_name: codellama
-    litellm_params:
-      model: ollama/codellama:13b
-      api_base: YOUR_HOSTING_SERVER
-  - model_name: codellama_13b_code
-    litellm_params:
-      model: ollama/codellama:code
-      api_base: YOUR_HOSTING_SERVER
-  - model_name: deepseekcoder
-    litellm_params:
-      model: ollama/deepseekcoder
-      api_base: YOUR_HOSTING_SERVER
-  - model_name: mixtral_8x22b
-    litellm_params:
-      model: ollama/mixtral:8x22b
-      api_base: YOUR_HOSTING_SERVER
-  - model_name: codegemma_2b
-    litellm_params:
-      model: ollama/codegemma:2b
-      api_base: YOUR_HOSTING_SERVER
-  - model_name: codegemma_7b
-    litellm_params:
-      model: ollama/codegemma:code
-      api_base: YOUR_HOSTING_SERVER
-```
-
 ## Configure your GitLab instance
 
 1. For the GitLab instance to know where the AI Gateway is located so it can access
diff --git a/doc/api/graphql/reference/index.md b/doc/api/graphql/reference/index.md
index 3744a6535b5a399426d4ed9145c9d05172a90c50..0d17a417696fbe8a43c391da959f1c6f322adad3 100644
--- a/doc/api/graphql/reference/index.md
+++ b/doc/api/graphql/reference/index.md
@@ -35855,23 +35855,12 @@ LLMs supported by the self-hosted model features.
 
 | Value | Description |
 | ----- | ----------- |
-| <a id="aiacceptedselfhostedmodelscodegemma"></a>`CODEGEMMA` | CodeGemma 7b-it: Suitable for code generation. |
-| <a id="aiacceptedselfhostedmodelscodegemma_2b"></a>`CODEGEMMA_2B` | CodeGemma 2b: Suitable for code completion. |
-| <a id="aiacceptedselfhostedmodelscodegemma_7b"></a>`CODEGEMMA_7B` | CodeGemma 7b: Suitable for code completion. |
-| <a id="aiacceptedselfhostedmodelscodellama"></a>`CODELLAMA` | Code-Llama 13b: Suitable for code generation. |
-| <a id="aiacceptedselfhostedmodelscodellama_13b_code"></a>`CODELLAMA_13B_CODE` | Code-Llama 13b-code: Suitable for code completion. |
-| <a id="aiacceptedselfhostedmodelscodestral"></a>`CODESTRAL` | Codestral 22B: Suitable for code completion and code generation. |
-| <a id="aiacceptedselfhostedmodelsdeepseekcoder"></a>`DEEPSEEKCODER` | Deepseek Coder 1.3b, 6.7b and 33b base or instruct. |
-| <a id="aiacceptedselfhostedmodelsllama3"></a>`LLAMA3` | LLaMA 3 - 8B: Suitable for code generation and completion. |
-| <a id="aiacceptedselfhostedmodelsllama3_70b"></a>`LLAMA3_70B` | LLaMA 3 - 70B: Suitable for code generation and completion. |
-| <a id="aiacceptedselfhostedmodelsllama3_70b_text"></a>`LLAMA3_70B_TEXT` | LLaMA 3 Text - 70B Text: Suitable for code generation and completion. |
-| <a id="aiacceptedselfhostedmodelsllama3_text"></a>`LLAMA3_TEXT` | LLaMA 3 Text - 8B: Suitable for code generation and completion. |
-| <a id="aiacceptedselfhostedmodelsmistral"></a>`MISTRAL` | Mistral 7B: Suitable for code generation and duo chat. |
-| <a id="aiacceptedselfhostedmodelsmistral_text"></a>`MISTRAL_TEXT` | Mistral-7B Text: Suitable for code completion. |
-| <a id="aiacceptedselfhostedmodelsmixtral"></a>`MIXTRAL` | Mixtral 8x7B: Suitable for code generation and duo chat. |
-| <a id="aiacceptedselfhostedmodelsmixtral_8x22b"></a>`MIXTRAL_8X22B` | Mixtral 8x22B: Suitable for code generation and duo chat. |
-| <a id="aiacceptedselfhostedmodelsmixtral_8x22b_text"></a>`MIXTRAL_8X22B_TEXT` | Mixtral-8x22B Text: Suitable for code completion. |
-| <a id="aiacceptedselfhostedmodelsmixtral_text"></a>`MIXTRAL_TEXT` | Mixtral-8x7B Text: Suitable for code completion. |
+| <a id="aiacceptedselfhostedmodelscodegemma"></a>`CODEGEMMA` | CodeGemma Code: Suitable for code suggestions. |
+| <a id="aiacceptedselfhostedmodelscodellama"></a>`CODELLAMA` | Code-Llama Instruct: Suitable for code suggestions. |
+| <a id="aiacceptedselfhostedmodelscodestral"></a>`CODESTRAL` | Codestral: Suitable for code suggestions. |
+| <a id="aiacceptedselfhostedmodelsdeepseekcoder"></a>`DEEPSEEKCODER` | Deepseek Coder base or instruct. |
+| <a id="aiacceptedselfhostedmodelsllama3"></a>`LLAMA3` | LLaMA 3: Suitable for code suggestions and duo chat. |
+| <a id="aiacceptedselfhostedmodelsmistral"></a>`MISTRAL` | Mistral: Suitable for code suggestions and duo chat. |
 
 ### `AiAction`
 
diff --git a/ee/app/graphql/types/ai/self_hosted_models/accepted_models_enum.rb b/ee/app/graphql/types/ai/self_hosted_models/accepted_models_enum.rb
index f7df39932a22caf6fba9fb7920a88bbae3d95df6..074df3ef091451d9d3a3f764057de23ff663b6a4 100644
--- a/ee/app/graphql/types/ai/self_hosted_models/accepted_models_enum.rb
+++ b/ee/app/graphql/types/ai/self_hosted_models/accepted_models_enum.rb
@@ -7,32 +7,12 @@ class AcceptedModelsEnum < BaseEnum
         graphql_name 'AiAcceptedSelfHostedModels'
         description 'LLMs supported by the self-hosted model features.'
 
-        value 'CODEGEMMA_2B', 'CodeGemma 2b: Suitable for code completion.', value: 'codegemma_2b'
-        value 'CODEGEMMA', 'CodeGemma 7b-it: Suitable for code generation.', value: 'codegemma'
-        value 'CODEGEMMA_7B', 'CodeGemma 7b: Suitable for code completion.', value: 'codegemma_7b'
-        value 'CODELLAMA_13B_CODE', 'Code-Llama 13b-code: Suitable for code completion.', value: 'codellama_13b_code'
-        value 'CODELLAMA', 'Code-Llama 13b: Suitable for code generation.', value: 'codellama'
-        value 'CODESTRAL', 'Codestral 22B: Suitable for code completion and code generation.',
-          value: 'codestral'
-        value 'MISTRAL', 'Mistral 7B: Suitable for code generation and duo chat.', value: 'mistral'
-        value 'MIXTRAL_8X22B', 'Mixtral 8x22B: Suitable for code generation and duo chat.', value: 'mixtral_8x22b'
-        value 'MIXTRAL', 'Mixtral 8x7B: Suitable for code generation and duo chat.', value: 'mixtral'
-        value 'DEEPSEEKCODER', description: 'Deepseek Coder 1.3b, 6.7b and 33b base or instruct.',
-          value: 'deepseekcoder'
-        value 'MISTRAL_TEXT', description: 'Mistral-7B Text: Suitable for code completion.',
-          value: 'mistral_text'
-        value 'MIXTRAL_TEXT', description: 'Mixtral-8x7B Text: Suitable for code completion.',
-          value: 'mixtral_text'
-        value 'MIXTRAL_8X22B_TEXT', description: 'Mixtral-8x22B Text: Suitable for code completion.',
-          value: 'mixtral_8x22b_text'
-        value 'LLAMA3', description: 'LLaMA 3 - 8B: Suitable for code generation and completion.',
-          value: 'llama3'
-        value 'LLAMA3_TEXT', description: 'LLaMA 3 Text - 8B: Suitable for code generation and completion.',
-          value: 'llama3_text'
-        value 'LLAMA3_70B', description: 'LLaMA 3 - 70B: Suitable for code generation and completion.',
-          value: 'llama3_70b'
-        value 'LLAMA3_70B_TEXT', description: 'LLaMA 3 Text - 70B Text: Suitable for code generation and completion.',
-          value: 'llama3_70b_text'
+        value 'CODEGEMMA', 'CodeGemma Code: Suitable for code suggestions.', value: 'codegemma'
+        value 'CODELLAMA', 'Code-Llama Instruct: Suitable for code suggestions.', value: 'codellama'
+        value 'CODESTRAL', 'Codestral: Suitable for code suggestions.', value: 'codestral'
+        value 'MISTRAL', 'Mistral: Suitable for code suggestions and duo chat.', value: 'mistral'
+        value 'DEEPSEEKCODER', description: 'Deepseek Coder base or instruct.', value: 'deepseekcoder'
+        value 'LLAMA3', description: 'LLaMA 3: Suitable for code suggestions and duo chat.', value: 'llama3'
       end
     end
   end
diff --git a/ee/app/models/ai/self_hosted_model.rb b/ee/app/models/ai/self_hosted_model.rb
index f322fc8a6cf1f5e17613b135b6fb92e8b1baa6e4..4047a99f2e5e494b00091b88423fec8394ccb01c 100644
--- a/ee/app/models/ai/self_hosted_model.rb
+++ b/ee/app/models/ai/self_hosted_model.rb
@@ -19,22 +19,11 @@ class SelfHostedModel < ApplicationRecord
 
     enum model: {
       mistral: 0,
-      mixtral: 1,
+      llama3: 1,
       codegemma: 2,
       codestral: 3,
       codellama: 4,
-      codellama_13b_code: 5,
-      deepseekcoder: 6,
-      mixtral_8x22b: 7,
-      codegemma_2b: 8,
-      codegemma_7b: 9,
-      mistral_text: 10,
-      mixtral_text: 11,
-      mixtral_8x22b_text: 12,
-      llama3: 13,
-      llama3_text: 14,
-      llama3_70b: 15,
-      llama3_70b_text: 16
+      deepseekcoder: 5
     }
 
     # For now, only OpenAI API format is supported, this method will be potentially
diff --git a/ee/app/views/admin/ai/self_hosted_models/_form.html.haml b/ee/app/views/admin/ai/self_hosted_models/_form.html.haml
index 4ff46268bdd9c3a5292bbb926904f308b46854b9..280b0a9cf11b5c59ac9ae8cd01f5783ef1b1f449 100644
--- a/ee/app/views/admin/ai/self_hosted_models/_form.html.haml
+++ b/ee/app/views/admin/ai/self_hosted_models/_form.html.haml
@@ -6,7 +6,7 @@
 
   .form-group
     = f.label :name, s_('AdminSelfHostedModels|Name the deployment (must be unique)')
-    = f.text_field :name, class: "gl-form-input form-control js-quick-submit", data: { testid: 'self-hosted-model-name-field' }, placeholder: "Mixtral-vllm-deployment"
+    = f.text_field :name, class: "gl-form-input form-control js-quick-submit", data: { testid: 'self-hosted-model-name-field' }, placeholder: "Mistral-vllm-deployment"
 
   .form-group
     = f.label :model
diff --git a/ee/lib/gitlab/ai/feature_settings/feature_metadata.yml b/ee/lib/gitlab/ai/feature_settings/feature_metadata.yml
index aca359f4daad8151e9c97d65e30c21fbb2dc592b..1fa8ceda2b377b0faa6af44a3271bb4fd7258fc3 100644
--- a/ee/lib/gitlab/ai/feature_settings/feature_metadata.yml
+++ b/ee/lib/gitlab/ai/feature_settings/feature_metadata.yml
@@ -8,37 +8,21 @@ code_generations:
     - codestral
     - deepseekcoder
     - mistral
-    - mixtral
-    - mixtral_8x22b
     - llama3
-    - llama3_text
-    - llama3_70b
-    - llama3_70b_text
 code_completions:
   title: Code Completion
   release_state: GA
   main_feature: Code Suggestions
   compatible_llms:
-    - codegemma_2b
-    - codegemma_7b
-    - codellama_13b_code
+    - codegemma
+    - codellama
     - codestral
     - deepseekcoder
     - mistral
-    - mixtral
-    - mixtral_8x22b
-    - mistral_text
-    - mixtral_text
-    - mixtral_8x22b_text
     - llama3
-    - llama3_text
-    - llama3_70b
-    - llama3_70b_text
 duo_chat:
   title: Duo Chat
   release_state: BETA
   main_feature: Duo Chat
   compatible_llms:
     - mistral
-    - mixtral_8x22b
-    - mixtral
diff --git a/ee/spec/frontend/pages/admin/ai/feature_settings/mock_data.js b/ee/spec/frontend/pages/admin/ai/feature_settings/mock_data.js
index cedba1bbda8238cd638c6022463cd4e7153dc30b..eae2a2d9f597b2ffe9d9e75cb5c9b103440582c6 100644
--- a/ee/spec/frontend/pages/admin/ai/feature_settings/mock_data.js
+++ b/ee/spec/frontend/pages/admin/ai/feature_settings/mock_data.js
@@ -24,6 +24,6 @@ export const mockAiFeatureSettings = [
 
 export const mockSelfHostedModels = [
   { id: 1, name: 'Model 1', model: 'mistral' },
-  { id: 2, name: 'Model 2', model: 'mixtral' },
+  { id: 2, name: 'Model 2', model: 'codellama' },
   { id: 3, name: 'Model 3', model: 'codegemma' },
 ];
diff --git a/ee/spec/frontend/pages/admin/ai/feature_settings/model_select_dropdown_spec.js b/ee/spec/frontend/pages/admin/ai/feature_settings/model_select_dropdown_spec.js
index 8872ec559b272d9e6da1c919036d6630a878d727..55213eb9781cc59f0a0e81e3815cecd16d177218 100644
--- a/ee/spec/frontend/pages/admin/ai/feature_settings/model_select_dropdown_spec.js
+++ b/ee/spec/frontend/pages/admin/ai/feature_settings/model_select_dropdown_spec.js
@@ -67,7 +67,7 @@ describe('ModelSelectDropdown', () => {
 
     expect(modelOptions.map((model) => model.text)).toEqual([
       'Model 1 (mistral)',
-      'Model 2 (mixtral)',
+      'Model 2 (codellama)',
       'Model 3 (codegemma)',
       'Disabled',
     ]);
diff --git a/ee/spec/frontend/pages/admin/ai/self_hosted_models/mock_data.js b/ee/spec/frontend/pages/admin/ai/self_hosted_models/mock_data.js
index 1820e4b6e31c59ae53a80fe7a950ef3f2a2807be..4488e57033d6b2fb0e4044f288b12beae8a7bcab 100644
--- a/ee/spec/frontend/pages/admin/ai/self_hosted_models/mock_data.js
+++ b/ee/spec/frontend/pages/admin/ai/self_hosted_models/mock_data.js
@@ -1,7 +1,7 @@
 export const mockSelfHostedModel = {
   id: 'gid://gitlab/Ai::SelfHostedModel/1',
   name: 'mock-self-hosted-model',
-  model: 'mixtral',
+  model: 'mistral',
   endpoint: 'https://mock-endpoint.com',
   apiToken: '',
 };
@@ -10,7 +10,7 @@ export const mockSelfHostedModelsList = [
   {
     id: 'gid://gitlab/Ai::SelfHostedModel/1',
     name: 'mock-self-hosted-model-1',
-    model: 'mixtral',
+    model: 'codellama',
     endpoint: 'https://mock-endpoint-1.com',
     hasApiToken: true,
   },
@@ -32,21 +32,10 @@ export const mockAiSelfHostedModelsQueryResponse = {
 };
 
 export const SELF_HOSTED_MODEL_OPTIONS = [
-  { modelValue: 'CODEGEMMA_2B', modelName: 'CodeGemma 2b' },
-  { modelValue: 'CODEGEMMA', modelName: 'CodeGemma 7b-it' },
-  { modelValue: 'CODEGEMMA_7B', modelName: 'CodeGemma 7b' },
-  { modelValue: 'CODELLAMA_13B_CODE', modelName: 'Code-Llama 13b-code' },
-  { modelValue: 'CODELLAMA', modelName: 'Code-Llama 13b' },
-  { modelValue: 'CODESTRAL', modelName: 'Codestral 22B' },
-  { modelValue: 'MISTRAL', modelName: 'Mistral 7B' },
-  { modelValue: 'MIXTRAL_8X22B', modelName: 'Mixtral 8x22B' },
-  { modelValue: 'MIXTRAL', modelName: 'Mixtral 8x7B' },
-  { modelValue: 'DEEPSEEKCODER', modelName: 'DEEPSEEKCODER' },
-  { modelValue: 'MISTRAL_TEXT', modelName: 'Mistral Text 7B' },
-  { modelValue: 'MIXTRAL_TEXT', modelName: 'Mixtral Text 8x7B' },
-  { modelValue: 'MIXTRAL_8X22B_TEXT', modelName: 'Mixtral Text 8X22B' },
-  { modelValue: 'LLAMA3', modelName: 'LLaMA 3 - 13B' },
-  { modelValue: 'LLAMA3_TEXT', modelName: 'LLaMA 3 - 13B Text' },
-  { modelValue: 'LLAMA3_70B', modelName: 'LLaMA 3 - 70B' },
-  { modelValue: 'LLAMA3_70B_TEXT', modelName: 'LLaMA 3 - 70B Text' },
+  { modelValue: 'CODEGEMMA', modelName: 'CodeGemma' },
+  { modelValue: 'CODELLAMA', modelName: 'Code-Llama' },
+  { modelValue: 'CODESTRAL', modelName: 'Codestral' },
+  { modelValue: 'MISTRAL', modelName: 'Mistral' },
+  { modelValue: 'DEEPSEEKCODER', modelName: 'Deepseek Coder' },
+  { modelValue: 'LLAMA3', modelName: 'LLaMA 3' },
 ];
diff --git a/ee/spec/frontend/pages/admin/ai/self_hosted_models/self_hosted_models_form_spec.js b/ee/spec/frontend/pages/admin/ai/self_hosted_models/self_hosted_models_form_spec.js
index 681197639ecc6e201cae845c8a9b39a8f0636d03..f941fc82bc845ce4bfef5f7ca6eddb181f340c8a 100644
--- a/ee/spec/frontend/pages/admin/ai/self_hosted_models/self_hosted_models_form_spec.js
+++ b/ee/spec/frontend/pages/admin/ai/self_hosted_models/self_hosted_models_form_spec.js
@@ -86,23 +86,12 @@ describe('SelfHostedModelForm', () => {
 
       const modelOptions = modelDropdownSelector.props('items');
       expect(modelOptions.map((model) => model.text)).toEqual([
-        'CodeGemma 2b',
-        'CodeGemma 7b-it',
-        'CodeGemma 7b',
-        'Code-Llama 13b-code',
-        'Code-Llama 13b',
-        'Codestral 22B',
-        'Mistral 7B',
-        'Mixtral 8x22B',
-        'Mixtral 8x7B',
-        'DEEPSEEKCODER',
-        'Mistral Text 7B',
-        'Mixtral Text 8x7B',
-        'Mixtral Text 8X22B',
-        'LLaMA 3 - 13B',
-        'LLaMA 3 - 13B Text',
-        'LLaMA 3 - 70B',
-        'LLaMA 3 - 70B Text',
+        'CodeGemma',
+        'Code-Llama',
+        'Codestral',
+        'Mistral',
+        'Deepseek Coder',
+        'LLaMA 3',
       ]);
     });
 
@@ -149,7 +138,7 @@ describe('SelfHostedModelForm', () => {
       it('renders an error message', async () => {
         await findNameInputField().setValue('test deployment');
         await findEndpointInputField().setValue('http://test.com');
-        await findCollapsibleListBox().vm.$emit('select', 'MIXTRAL');
+        await findCollapsibleListBox().vm.$emit('select', 'MISTRAL');
 
         wrapper.find('form').trigger('submit.prevent');
 
@@ -180,7 +169,7 @@ describe('SelfHostedModelForm', () => {
       it('renders an error message', async () => {
         await findNameInputField().setValue('test deployment');
         await findEndpointInputField().setValue('invalid endpoint');
-        await findCollapsibleListBox().vm.$emit('select', 'MIXTRAL');
+        await findCollapsibleListBox().vm.$emit('select', 'MISTRAL');
 
         wrapper.find('form').trigger('submit.prevent');
 
@@ -201,7 +190,7 @@ describe('SelfHostedModelForm', () => {
 
         await findNameInputField().setValue('test deployment');
         await findEndpointInputField().setValue('http://test.com');
-        await findCollapsibleListBox().vm.$emit('select', 'MIXTRAL');
+        await findCollapsibleListBox().vm.$emit('select', 'MISTRAL');
 
         wrapper.find('form').trigger('submit.prevent');
 
@@ -228,7 +217,7 @@ describe('SelfHostedModelForm', () => {
     it('invokes the create mutation with correct input variables', async () => {
       await findNameInputField().setValue('test deployment');
       await findEndpointInputField().setValue('http://test.com');
-      await findCollapsibleListBox().vm.$emit('select', 'MIXTRAL');
+      await findCollapsibleListBox().vm.$emit('select', 'MISTRAL');
 
       wrapper.find('form').trigger('submit.prevent');
 
@@ -238,7 +227,7 @@ describe('SelfHostedModelForm', () => {
         input: {
           name: 'test deployment',
           endpoint: 'http://test.com',
-          model: 'MIXTRAL',
+          model: 'MISTRAL',
           apiToken: '',
         },
       });
@@ -277,7 +266,7 @@ describe('SelfHostedModelForm', () => {
     it('invokes the update mutation with correct input variables', async () => {
       await findNameInputField().setValue('test deployment');
       await findEndpointInputField().setValue('http://test.com');
-      await findCollapsibleListBox().vm.$emit('select', 'MIXTRAL');
+      await findCollapsibleListBox().vm.$emit('select', 'MISTRAL');
       await findApiKeyInputField().vm.$emit('input', 'abc123');
 
       wrapper.find('form').trigger('submit.prevent');
@@ -289,7 +278,7 @@ describe('SelfHostedModelForm', () => {
           id: mockModelData.id,
           name: 'test deployment',
           endpoint: 'http://test.com',
-          model: 'MIXTRAL',
+          model: 'MISTRAL',
           apiToken: 'abc123',
         },
       });
diff --git a/ee/spec/frontend/pages/admin/ai/self_hosted_models/self_hosted_models_table_spec.js b/ee/spec/frontend/pages/admin/ai/self_hosted_models/self_hosted_models_table_spec.js
index 27b8c6d21d5131a6ec14430ab6faa36c6c1d6537..0b439725e84491f9676955d80201d4d0de44a49d 100644
--- a/ee/spec/frontend/pages/admin/ai/self_hosted_models/self_hosted_models_table_spec.js
+++ b/ee/spec/frontend/pages/admin/ai/self_hosted_models/self_hosted_models_table_spec.js
@@ -56,7 +56,7 @@ describe('SelfHostedModelsTable', () => {
     const firstModel = findNthTableRow(0);
 
     expect(firstModel.text()).toContain('mock-self-hosted-model-1');
-    expect(firstModel.text()).toContain('mixtral');
+    expect(firstModel.text()).toContain('codellama');
     expect(firstModel.text()).toContain('https://mock-endpoint-1.com');
     expect(firstModel.find('[data-testid="check-circle-icon"]').exists()).toBe(true);
   });
diff --git a/ee/spec/graphql/types/ai/self_hosted_models/accepted_models_enum_spec.rb b/ee/spec/graphql/types/ai/self_hosted_models/accepted_models_enum_spec.rb
index f04b3e7ef9cc68438088202d5c28175a872f4da6..9aa5f48e07451ce79258d3fbbd5766332fa7bf66 100644
--- a/ee/spec/graphql/types/ai/self_hosted_models/accepted_models_enum_spec.rb
+++ b/ee/spec/graphql/types/ai/self_hosted_models/accepted_models_enum_spec.rb
@@ -6,16 +6,13 @@
   it { expect(described_class.graphql_name).to eq('AiAcceptedSelfHostedModels') }
 
   it 'exposes all the curated LLMs for self-hosted feature' do
-    expect(described_class.values.keys).to include(*%w[
-      CODEGEMMA_2B
+    expect(described_class.values.keys).to match_array(%w[
       CODEGEMMA
-      CODEGEMMA_7B
-      CODELLAMA_13B_CODE
       CODELLAMA
       CODESTRAL
       MISTRAL
-      MIXTRAL_8X22B
-      MIXTRAL
+      DEEPSEEKCODER
+      LLAMA3
     ])
   end
 end
diff --git a/ee/spec/lib/code_suggestions/tasks/code_completion_spec.rb b/ee/spec/lib/code_suggestions/tasks/code_completion_spec.rb
index ef631976243a7cbd8fd8f535f15b48cd562a837a..9e54c46060672801b694e5c5eede179527a44371 100644
--- a/ee/spec/lib/code_suggestions/tasks/code_completion_spec.rb
+++ b/ee/spec/lib/code_suggestions/tasks/code_completion_spec.rb
@@ -115,7 +115,7 @@
 
     it_behaves_like 'code suggestion task' do
       let_it_be(:ai_self_hosted_model) do
-        create(:ai_self_hosted_model, model: :codellama_13b_code, name: 'whatever')
+        create(:ai_self_hosted_model, model: :codellama, name: 'whatever')
       end
 
       let_it_be(:ai_feature_setting) do
@@ -139,8 +139,8 @@
           "prompt_version" => 2,
           "prompt" => nil,
           "model_endpoint" => "http://localhost:11434/v1",
-          "model_name" => "codellama_13b_code",
           "model_identifier" => "provider/some-model",
+          "model_name" => "codellama",
           "model_api_key" => "token"
         }
       end
diff --git a/ee/spec/models/ai/feature_setting_spec.rb b/ee/spec/models/ai/feature_setting_spec.rb
index fbd91364566badea30ce316a652e9a970dd7abe3..e302b8e92ce2f07f6328e272510710b93050b595 100644
--- a/ee/spec/models/ai/feature_setting_spec.rb
+++ b/ee/spec/models/ai/feature_setting_spec.rb
@@ -62,7 +62,7 @@
     end
 
     let_it_be(:other_self_hosted_model) do
-      create(:ai_self_hosted_model, name: 'other_model', model: :mixtral)
+      create(:ai_self_hosted_model, name: 'other_model', model: :codegemma)
     end
 
     let_it_be(:other_feature_setting) do
@@ -139,7 +139,7 @@
 
     context 'when feature metadata exists' do
       let(:feature_metadata) do
-        { 'title' => 'Duo Chat', 'main_feature' => 'duo_chat', 'compatible_llms' => ['mixtral_8x22b'],
+        { 'title' => 'Duo Chat', 'main_feature' => 'duo_chat', 'compatible_llms' => ['codellama'],
           'release_state' => 'BETA' }
       end
 
@@ -149,7 +149,7 @@
         expect(metadata).to be_an_instance_of(Ai::FeatureSetting::FeatureMetadata)
         expect(metadata.title).to eq('Duo Chat')
         expect(metadata.main_feature).to eq('duo_chat')
-        expect(metadata.compatible_llms).to eq(['mixtral_8x22b'])
+        expect(metadata.compatible_llms).to eq(['codellama'])
         expect(metadata.release_state).to eq('BETA')
       end
     end
@@ -170,7 +170,7 @@
   end
 
   describe '#compatible_self_hosted_models' do
-    let_it_be(:llm_names) { %w[codegemma_2b deepseekcoder mixtral codellama_13b_code] }
+    let_it_be(:llm_names) { %w[codegemma deepseekcoder mistral codellama] }
     let_it_be(:models) do
       llm_names.map do |llm_name|
         create(:ai_self_hosted_model, name: "vllm_#{llm_name}", model: llm_name)
@@ -188,7 +188,7 @@
     context 'with compatible LLMs assigned to the feature' do
       let(:feature_metadata) do
         { 'title' => 'Code Generation', 'main_feature' => 'Code Suggestion',
-          'compatible_llms' => %w[deepseekcoder codellama_13b_code], 'release_state' => 'GA' }
+          'compatible_llms' => %w[deepseekcoder codellama], 'release_state' => 'GA' }
       end
 
       it 'returns the compatible self-hosted models' do
@@ -244,7 +244,7 @@
       end
 
       context 'when compatible_llms is present' do
-        let(:compatible_llms) { %w[mistral mixtral_8x22b mixtral] }
+        let(:compatible_llms) { %w[mistral deepseekcoder codegemma] }
 
         before do
           allow(feature_setting).to receive(:compatible_llms).and_return(compatible_llms)
@@ -260,7 +260,7 @@
 
         context 'when self_hosted_model is not compatible' do
           it 'adds an error message' do
-            incompatible_model = :codegemma_7b
+            incompatible_model = :codellama
             self_hosted_model.model = incompatible_model
             feature_setting.validate
             expect(feature_setting.errors[:base])
diff --git a/ee/spec/requests/admin/ai/self_hosted_models_controller_spec.rb b/ee/spec/requests/admin/ai/self_hosted_models_controller_spec.rb
index 44e6b5bf05f4f6e5b1d24f63d6d0b1a1402e0c40..cb2aaae2c02acb31551892d91675e428fb2b2414 100644
--- a/ee/spec/requests/admin/ai/self_hosted_models_controller_spec.rb
+++ b/ee/spec/requests/admin/ai/self_hosted_models_controller_spec.rb
@@ -97,7 +97,7 @@
   describe 'GET #edit' do
     let(:page) { Nokogiri::HTML(response.body) }
     let(:self_hosted_model) do
-      create(:ai_self_hosted_model, model: :mixtral, api_token: nil)
+      create(:ai_self_hosted_model, model: :mistral, api_token: nil)
     end
 
     subject :perform_request do
@@ -110,7 +110,7 @@
       expect(response).to have_gitlab_http_status(:ok)
 
       expect(page.at('#self_hosted_model_name')['value']).to eq(self_hosted_model.name)
-      expect(page.at('#self_hosted_model_model option[@selected="selected"]')['value']).to eq('mixtral')
+      expect(page.at('#self_hosted_model_model option[@selected="selected"]')['value']).to eq('mistral')
       expect(page.at('#self_hosted_model_endpoint')['value']).to eq(self_hosted_model.endpoint)
     end
 
@@ -145,7 +145,7 @@
       {
         self_hosted_model: {
           name: 'test',
-          model: :mixtral,
+          model: :mistral,
           endpoint: 'https://example.com'
         }
       }
@@ -160,7 +160,7 @@
 
       self_hosted_model = ::Ai::SelfHostedModel.last
       expect(self_hosted_model.name).to eq 'test'
-      expect(self_hosted_model.model).to eq 'mixtral'
+      expect(self_hosted_model.model).to eq 'mistral'
       expect(self_hosted_model.endpoint).to eq 'https://example.com'
 
       expect(response).to redirect_to(admin_ai_self_hosted_models_url)
@@ -172,7 +172,7 @@
 
   describe 'PATCH #update' do
     let(:self_hosted_model) do
-      create(:ai_self_hosted_model, name: 'test', model: :mixtral, api_token: 'did_not_change')
+      create(:ai_self_hosted_model, name: 'test', model: :mistral, api_token: 'did_not_change')
     end
 
     subject :perform_request do
diff --git a/ee/spec/requests/api/graphql/ai/self_hosted_models/self_hosted_models_spec.rb b/ee/spec/requests/api/graphql/ai/self_hosted_models/self_hosted_models_spec.rb
index 8a579cd73f3defaed60dd108f66c12d4fcb4a43a..6a5528f9a3d9946c0c47b97b7f924ecc6636d09a 100644
--- a/ee/spec/requests/api/graphql/ai/self_hosted_models/self_hosted_models_spec.rb
+++ b/ee/spec/requests/api/graphql/ai/self_hosted_models/self_hosted_models_spec.rb
@@ -10,8 +10,8 @@
   let! :model_params do
     [
       { name: 'ollama1-mistral', model: :mistral },
-      { name: 'vllm-mixtral', model: :mixtral, api_token: "test_api_token" },
-      { name: 'ollama2-mistral', model: :mistral }
+      { name: 'vllm-mistral', model: :mistral, api_token: "test_api_token" },
+      { name: 'ollama2-codegemma', model: :codegemma }
     ]
   end
 
diff --git a/ee/spec/requests/api/graphql/ai/self_hosted_models/update_spec.rb b/ee/spec/requests/api/graphql/ai/self_hosted_models/update_spec.rb
index b81b84bc87492a2675697c3bbef0950eb990b737..7cab43557cce3eb0f812b48e00c23f155e65c773 100644
--- a/ee/spec/requests/api/graphql/ai/self_hosted_models/update_spec.rb
+++ b/ee/spec/requests/api/graphql/ai/self_hosted_models/update_spec.rb
@@ -21,7 +21,7 @@
     {
       id: GitlabSchema.id_from_object(self_hosted_model).to_s,
       name: 'new-test-deployment',
-      model: 'MIXTRAL',
+      model: 'CODEGEMMA',
       endpoint: 'https://new-test-endpoint.com',
       api_token: '',
       identifier: 'provider/some-model-2'
@@ -56,7 +56,7 @@
           {
             id: GitlabSchema.id_from_object(self_hosted_model).to_s,
             name: '',
-            model: 'MIXTRAL',
+            model: 'CODEGEMMA',
             endpoint: 'https://new-test-endpoint.com',
             api_token: '',
             identifier: 'provider/some-model-2'
@@ -98,7 +98,7 @@
           self_hosted_model.reload
 
           expect(self_hosted_model.name).to eq('new-test-deployment')
-          expect(self_hosted_model.model).to eq('mixtral')
+          expect(self_hosted_model.reload.model).to eq('codegemma')
           expect(self_hosted_model.endpoint).to eq('https://new-test-endpoint.com')
           expect(self_hosted_model.identifier).to eq('provider/some-model-2')
         end