diff --git a/app/controllers/projects/logs_controller.rb b/app/controllers/projects/logs_controller.rb deleted file mode 100644 index 0f751db2064a4dcaf5ab196502b595aabbfaf08b..0000000000000000000000000000000000000000 --- a/app/controllers/projects/logs_controller.rb +++ /dev/null @@ -1,103 +0,0 @@ -# frozen_string_literal: true - -module Projects - class LogsController < Projects::ApplicationController - include ::Gitlab::Utils::StrongMemoize - - before_action :authorize_read_pod_logs! - before_action :ensure_deployments, only: %i(k8s elasticsearch) - - feature_category :logging - urgency :low - - def index - return render_404 unless Feature.enabled?(:monitor_logging, project) - - if environment || cluster - render :index - else - render :empty_logs - end - end - - def k8s - render_logs(::PodLogs::KubernetesService, k8s_params) - end - - def elasticsearch - render_logs(::PodLogs::ElasticsearchService, elasticsearch_params) - end - - private - - def render_logs(service, permitted_params) - ::Gitlab::PollingInterval.set_header(response, interval: 3_000) - - result = service.new(cluster, namespace, params: permitted_params).execute - - if result.nil? - head :accepted - elsif result[:status] == :success - render json: result - else - render status: :bad_request, json: result - end - end - - # cluster is selected either via environment or directly by id - def cluster_params - params.permit(:environment_name, :cluster_id) - end - - def k8s_params - params.permit(:container_name, :pod_name) - end - - def elasticsearch_params - params.permit(:container_name, :pod_name, :search, :start_time, :end_time, :cursor) - end - - def environment - strong_memoize(:environment) do - if cluster_params.key?(:environment_name) - ::Environments::EnvironmentsFinder.new(project, current_user, name: cluster_params[:environment_name]).execute.first - else - project.default_environment - end - end - end - - def cluster - strong_memoize(:cluster) do - if gitlab_managed_apps_logs? - clusters = ClusterAncestorsFinder.new(project, current_user).execute - clusters.find { |cluster| cluster.id == cluster_params[:cluster_id].to_i } - else - environment&.deployment_platform&.cluster - end - end - end - - def namespace - if gitlab_managed_apps_logs? - Gitlab::Kubernetes::Helm::NAMESPACE - else - environment.deployment_namespace - end - end - - def ensure_deployments - return if gitlab_managed_apps_logs? - return if cluster && namespace.present? - - render status: :bad_request, json: { - status: :error, - message: _('Environment does not have deployments') - } - end - - def gitlab_managed_apps_logs? - cluster_params.key?(:cluster_id) - end - end -end diff --git a/app/serializers/environment_entity.rb b/app/serializers/environment_entity.rb index ac99463bd642d57630f78481e06bb3a36bc22ce1..3473b4aebc87bbbb9e161b93c31a8bfcbac9bf2c 100644 --- a/app/serializers/environment_entity.rb +++ b/app/serializers/environment_entity.rb @@ -66,22 +66,6 @@ class EnvironmentEntity < Grape::Entity environment.available? && can?(current_user, :stop_environment, environment) end - expose :logs_path, if: -> (*) { can_read_pod_logs? } do |environment| - project_logs_path(environment.project, environment_name: environment.name) - end - - expose :logs_api_path, if: -> (*) { can_read_pod_logs? } do |environment| - if environment.elastic_stack_available? - elasticsearch_project_logs_path(environment.project, environment_name: environment.name, format: :json) - else - k8s_project_logs_path(environment.project, environment_name: environment.name, format: :json) - end - end - - expose :enable_advanced_logs_querying, if: -> (*) { can_read_pod_logs? } do |environment| - environment.elastic_stack_available? - end - expose :can_delete do |environment| can?(current_user, :destroy_environment, environment) end @@ -102,11 +86,6 @@ def can_update_environment? can?(current_user, :update_environment, environment) end - def can_read_pod_logs? - Feature.enabled?(:monitor_logging, environment.project) && - can?(current_user, :read_pod_logs, environment.project) - end - def can_read_deploy_board? can?(current_user, :read_deploy_board, environment.project) end diff --git a/app/services/pod_logs/base_service.rb b/app/services/pod_logs/base_service.rb deleted file mode 100644 index e4b6ad31e339c6f6801513c12abfb5e3e2bfd275..0000000000000000000000000000000000000000 --- a/app/services/pod_logs/base_service.rb +++ /dev/null @@ -1,91 +0,0 @@ -# frozen_string_literal: true - -module PodLogs - class BaseService < ::BaseService - include ReactiveCaching - include Stepable - - attr_reader :cluster, :namespace, :params - - CACHE_KEY_GET_POD_LOG = 'get_pod_log' - K8S_NAME_MAX_LENGTH = 253 - - self.reactive_cache_work_type = :external_dependency - - def id - cluster.id - end - - def initialize(cluster, namespace, params: {}) - @cluster = cluster - @namespace = namespace - @params = filter_params(params.dup.stringify_keys).to_hash - end - - def execute - with_reactive_cache( - CACHE_KEY_GET_POD_LOG, - namespace, - params - ) do |result| - result - end - end - - def calculate_reactive_cache(request, _namespace, _params) - case request - when CACHE_KEY_GET_POD_LOG - execute_steps - else - exception = StandardError.new('Unknown reactive cache request') - Gitlab::ErrorTracking.track_and_raise_for_dev_exception(exception, request: request) - error(_('Unknown cache key')) - end - end - - private - - def valid_params - %w(pod_name container_name) - end - - def success_return_keys - %i(status logs pod_name container_name pods) - end - - def check_arguments(result) - return error(_('Cluster does not exist')) if cluster.nil? - return error(_('Namespace is empty')) if namespace.blank? - - result[:pod_name] = params['pod_name'].presence - result[:container_name] = params['container_name'].presence - - return error(_('Invalid pod_name')) if result[:pod_name] && !result[:pod_name].is_a?(String) - return error(_('Invalid container_name')) if result[:container_name] && !result[:container_name].is_a?(String) - - success(result) - end - - def get_raw_pods(result) - raise NotImplementedError - end - - def get_pod_names(result) - result[:pods] = result[:raw_pods].map { |p| p[:name] } - - success(result) - end - - def pod_logs(result) - raise NotImplementedError - end - - def filter_return_keys(result) - result.slice(*success_return_keys) - end - - def filter_params(params) - params.slice(*valid_params) - end - end -end diff --git a/app/services/pod_logs/elasticsearch_service.rb b/app/services/pod_logs/elasticsearch_service.rb deleted file mode 100644 index 28ccace62e591993389c666f8c78a06412b1ab03..0000000000000000000000000000000000000000 --- a/app/services/pod_logs/elasticsearch_service.rb +++ /dev/null @@ -1,98 +0,0 @@ -# frozen_string_literal: true - -module PodLogs - class ElasticsearchService < PodLogs::BaseService - steps :check_arguments, - :get_raw_pods, - :get_pod_names, - :check_times, - :check_search, - :check_cursor, - :pod_logs, - :filter_return_keys - - self.reactive_cache_worker_finder = ->(id, _cache_key, namespace, params) { new(::Clusters::Cluster.find(id), namespace, params: params) } - - private - - def valid_params - super + %w(search start_time end_time cursor) - end - - def success_return_keys - super + %i(cursor) - end - - def get_raw_pods(result) - client = cluster&.elasticsearch_client - return error(_('Unable to connect to Elasticsearch')) unless client - - result[:raw_pods] = ::Gitlab::Elasticsearch::Logs::Pods.new(client).pods(namespace) - - success(result) - rescue Elasticsearch::Transport::Transport::ServerError => e - ::Gitlab::ErrorTracking.track_exception(e) - - error(_('Elasticsearch returned status code: %{status_code}') % { - # ServerError is the parent class of exceptions named after HTTP status codes, eg: "Elasticsearch::Transport::Transport::Errors::NotFound" - # there is no method on the exception other than the class name to determine the type of error encountered. - status_code: e.class.name.split('::').last - }) - end - - def check_times(result) - result[:start_time] = params['start_time'] if params.key?('start_time') && Time.iso8601(params['start_time']) - result[:end_time] = params['end_time'] if params.key?('end_time') && Time.iso8601(params['end_time']) - - success(result) - rescue ArgumentError - error(_('Invalid start or end time format')) - end - - def check_search(result) - result[:search] = params['search'] if params.key?('search') - - return error(_('Invalid search parameter')) if result[:search] && !result[:search].is_a?(String) - - success(result) - end - - def check_cursor(result) - result[:cursor] = params['cursor'] if params.key?('cursor') - - return error(_('Invalid cursor parameter')) if result[:cursor] && !result[:cursor].is_a?(String) - - success(result) - end - - def pod_logs(result) - client = cluster&.elasticsearch_client - return error(_('Unable to connect to Elasticsearch')) unless client - - response = ::Gitlab::Elasticsearch::Logs::Lines.new(client).pod_logs( - namespace, - pod_name: result[:pod_name], - container_name: result[:container_name], - search: result[:search], - start_time: result[:start_time], - end_time: result[:end_time], - cursor: result[:cursor], - chart_above_v2: cluster.elastic_stack_adapter.chart_above_v2? - ) - - result.merge!(response) - - success(result) - rescue Elasticsearch::Transport::Transport::ServerError => e - ::Gitlab::ErrorTracking.track_exception(e) - - error(_('Elasticsearch returned status code: %{status_code}') % { - # ServerError is the parent class of exceptions named after HTTP status codes, eg: "Elasticsearch::Transport::Transport::Errors::NotFound" - # there is no method on the exception other than the class name to determine the type of error encountered. - status_code: e.class.name.split('::').last - }) - rescue ::Gitlab::Elasticsearch::Logs::Lines::InvalidCursor - error(_('Invalid cursor value provided')) - end - end -end diff --git a/app/services/pod_logs/kubernetes_service.rb b/app/services/pod_logs/kubernetes_service.rb deleted file mode 100644 index 28b1a17963574ed6d181de94e48590dd117ebe67..0000000000000000000000000000000000000000 --- a/app/services/pod_logs/kubernetes_service.rb +++ /dev/null @@ -1,151 +0,0 @@ -# frozen_string_literal: true - -module PodLogs - class KubernetesService < PodLogs::BaseService - LOGS_LIMIT = 500 - REPLACEMENT_CHAR = "\u{FFFD}" - - EncodingHelperError = Class.new(StandardError) - - steps :check_arguments, - :get_raw_pods, - :get_pod_names, - :check_pod_name, - :check_container_name, - :pod_logs, - :encode_logs_to_utf8, - :split_logs, - :filter_return_keys - - self.reactive_cache_worker_finder = ->(id, _cache_key, namespace, params) { new(::Clusters::Cluster.find(id), namespace, params: params) } - - private - - def get_raw_pods(result) - result[:raw_pods] = cluster.kubeclient.get_pods(namespace: namespace).map do |pod| - { - name: pod.metadata.name, - container_names: pod.spec.containers.map(&:name) - } - end - - success(result) - end - - def check_pod_name(result) - # If pod_name is not received as parameter, get the pod logs of the first - # pod of this namespace. - result[:pod_name] ||= result[:pods].first - - unless result[:pod_name] - return error(_('No pods available')) - end - - unless result[:pod_name].length.to_i <= K8S_NAME_MAX_LENGTH - return error(_('pod_name cannot be larger than %{max_length}'\ - ' chars' % { max_length: K8S_NAME_MAX_LENGTH })) - end - - unless result[:pod_name] =~ Gitlab::Regex.kubernetes_dns_subdomain_regex - return error(_('pod_name can contain only lowercase letters, digits, \'-\', and \'.\' and must start and end with an alphanumeric character')) - end - - unless result[:pods].include?(result[:pod_name]) - return error(_('Pod does not exist')) - end - - success(result) - end - - def check_container_name(result) - pod_details = result[:raw_pods].find { |p| p[:name] == result[:pod_name] } - container_names = pod_details[:container_names] - - # select first container if not specified - result[:container_name] ||= container_names.first - - unless result[:container_name] - return error(_('No containers available')) - end - - unless result[:container_name].length.to_i <= K8S_NAME_MAX_LENGTH - return error(_('container_name cannot be larger than'\ - ' %{max_length} chars' % { max_length: K8S_NAME_MAX_LENGTH })) - end - - unless result[:container_name] =~ Gitlab::Regex.kubernetes_dns_subdomain_regex - return error(_('container_name can contain only lowercase letters, digits, \'-\', and \'.\' and must start and end with an alphanumeric character')) - end - - unless container_names.include?(result[:container_name]) - return error(_('Container does not exist')) - end - - success(result) - end - - def pod_logs(result) - result[:logs] = cluster.kubeclient.get_pod_log( - result[:pod_name], - namespace, - container: result[:container_name], - tail_lines: LOGS_LIMIT, - timestamps: true - ).body - - success(result) - rescue Kubeclient::ResourceNotFoundError - error(_('Pod not found')) - rescue Kubeclient::HttpError => e - ::Gitlab::ErrorTracking.track_exception(e) - - error(_('Kubernetes API returned status code: %{error_code}') % { - error_code: e.error_code - }) - end - - # Check https://gitlab.com/gitlab-org/gitlab/issues/34965#note_292261879 - # for more details on why this is necessary. - def encode_logs_to_utf8(result) - return success(result) if result[:logs].nil? - return success(result) if result[:logs].encoding == Encoding::UTF_8 - - result[:logs] = encode_utf8(result[:logs]) - - success(result) - rescue EncodingHelperError - error(_('Unable to convert Kubernetes logs encoding to UTF-8')) - end - - def split_logs(result) - result[:logs] = result[:logs].strip.lines(chomp: true).map do |line| - # message contains a RFC3339Nano timestamp, then a space, then the log line. - # resolution of the nanoseconds can vary, so we split on the first space - values = line.split(' ', 2) - { - timestamp: values[0], - message: values[1], - pod: result[:pod_name] - } - end - - success(result) - end - - def encode_utf8(logs) - utf8_logs = Gitlab::EncodingHelper.encode_utf8(logs.dup, replace: REPLACEMENT_CHAR) - - # Gitlab::EncodingHelper.encode_utf8 can return '' or nil if an exception - # is raised while encoding. We prefer to return an error rather than wrongly - # display blank logs. - no_utf8_logs = logs.present? && utf8_logs.blank? - unexpected_encoding = utf8_logs&.encoding != Encoding::UTF_8 - - if no_utf8_logs || unexpected_encoding - raise EncodingHelperError, 'Could not convert Kubernetes logs to UTF-8' - end - - utf8_logs - end - end -end diff --git a/lib/gitlab/elasticsearch/logs/lines.rb b/lib/gitlab/elasticsearch/logs/lines.rb deleted file mode 100644 index ff9185dd331dd593cb2cca49d6a9c71440dddcb1..0000000000000000000000000000000000000000 --- a/lib/gitlab/elasticsearch/logs/lines.rb +++ /dev/null @@ -1,157 +0,0 @@ -# frozen_string_literal: true - -module Gitlab - module Elasticsearch - module Logs - class Lines - InvalidCursor = Class.new(RuntimeError) - - # How many log lines to fetch in a query - LOGS_LIMIT = 500 - - def initialize(client) - @client = client - end - - def pod_logs(namespace, pod_name: nil, container_name: nil, search: nil, start_time: nil, end_time: nil, cursor: nil, chart_above_v2: true) - query = { bool: { must: [] } }.tap do |q| - filter_pod_name(q, pod_name) - filter_namespace(q, namespace) - filter_container_name(q, container_name) - filter_search(q, search) - filter_times(q, start_time, end_time) - end - - body = build_body(query, cursor, chart_above_v2) - response = @client.search body: body - - format_response(response) - end - - private - - def build_body(query, cursor = nil, chart_above_v2 = true) - offset_field = chart_above_v2 ? "log.offset" : "offset" - body = { - query: query, - # reverse order so we can query N-most recent records - sort: [ - { "@timestamp": { order: :desc } }, - { "#{offset_field}": { order: :desc } } - ], - # only return these fields in the response - _source: ["@timestamp", "message", "kubernetes.pod.name"], - # fixed limit for now, we should support paginated queries - size: ::Gitlab::Elasticsearch::Logs::Lines::LOGS_LIMIT - } - - unless cursor.nil? - body[:search_after] = decode_cursor(cursor) - end - - body - end - - def filter_pod_name(query, pod_name) - # We can filter by "all pods" with a null pod_name - return if pod_name.nil? - - query[:bool][:must] << { - match_phrase: { - "kubernetes.pod.name" => { - query: pod_name - } - } - } - end - - def filter_namespace(query, namespace) - query[:bool][:must] << { - match_phrase: { - "kubernetes.namespace" => { - query: namespace - } - } - } - end - - def filter_container_name(query, container_name) - # A pod can contain multiple containers. - # By default we return logs from every container - return if container_name.nil? - - query[:bool][:must] << { - match_phrase: { - "kubernetes.container.name" => { - query: container_name - } - } - } - end - - def filter_search(query, search) - return if search.nil? - - query[:bool][:must] << { - simple_query_string: { - query: search, - fields: [:message], - default_operator: :and - } - } - end - - def filter_times(query, start_time, end_time) - return unless start_time || end_time - - time_range = { range: { :@timestamp => {} } }.tap do |tr| - tr[:range][:@timestamp][:gte] = start_time if start_time - tr[:range][:@timestamp][:lt] = end_time if end_time - end - - query[:bool][:filter] = [time_range] - end - - def format_response(response) - results = response.fetch("hits", {}).fetch("hits", []) - last_result = results.last - results = results.map do |hit| - { - timestamp: hit["_source"]["@timestamp"], - message: hit["_source"]["message"], - pod: hit["_source"]["kubernetes"]["pod"]["name"] - } - end - - # we queried for the N-most recent records but we want them ordered oldest to newest - { - logs: results.reverse, - cursor: last_result.nil? ? nil : encode_cursor(last_result["sort"]) - } - end - - # we want to hide the implementation details of the search_after parameter from the frontend - # behind a single easily transmitted value - def encode_cursor(obj) - obj.join(',') - end - - def decode_cursor(obj) - cursor = obj.split(',').map(&:to_i) - - unless valid_cursor(cursor) - raise InvalidCursor, "invalid cursor format" - end - - cursor - end - - def valid_cursor(cursor) - cursor.instance_of?(Array) && - cursor.length == 2 && - cursor.map {|i| i.instance_of?(Integer)}.reduce(:&) - end - end - end - end -end diff --git a/lib/gitlab/elasticsearch/logs/pods.rb b/lib/gitlab/elasticsearch/logs/pods.rb deleted file mode 100644 index 66499ae956afe7a95c363bd20426fd920078f2d5..0000000000000000000000000000000000000000 --- a/lib/gitlab/elasticsearch/logs/pods.rb +++ /dev/null @@ -1,70 +0,0 @@ -# frozen_string_literal: true - -module Gitlab - module Elasticsearch - module Logs - class Pods - # How many items to fetch in a query - PODS_LIMIT = 500 - CONTAINERS_LIMIT = 500 - - def initialize(client) - @client = client - end - - def pods(namespace) - body = build_body(namespace) - response = @client.search body: body - - format_response(response) - end - - private - - def build_body(namespace) - { - aggs: { - pods: { - aggs: { - containers: { - terms: { - field: 'kubernetes.container.name', - size: ::Gitlab::Elasticsearch::Logs::Pods::CONTAINERS_LIMIT - } - } - }, - terms: { - field: 'kubernetes.pod.name', - size: ::Gitlab::Elasticsearch::Logs::Pods::PODS_LIMIT - } - } - }, - query: { - bool: { - must: { - match_phrase: { - "kubernetes.namespace": namespace - } - } - } - }, - # don't populate hits, only the aggregation is needed - size: 0 - } - end - - def format_response(response) - results = response.dig("aggregations", "pods", "buckets") || [] - results.map do |bucket| - { - name: bucket["key"], - container_names: (bucket.dig("containers", "buckets") || []).map do |cbucket| - cbucket["key"] - end - } - end - end - end - end - end -end diff --git a/locale/gitlab.pot b/locale/gitlab.pot index 5eefa0cd930581192d6857247b4b64920622fa1a..41edc63d96189dd2873dd8ab5403e9b24f77aec1 100644 --- a/locale/gitlab.pot +++ b/locale/gitlab.pot @@ -8261,9 +8261,6 @@ msgstr "" msgid "Cluster cache cleared." msgstr "" -msgid "Cluster does not exist" -msgstr "" - msgid "Cluster is required for Stages::ClusterEndpointInserter" msgstr "" @@ -9699,9 +9696,6 @@ msgstr "" msgid "Container Scanning" msgstr "" -msgid "Container does not exist" -msgstr "" - msgid "Container must be a project or a group." msgstr "" @@ -13915,9 +13909,6 @@ msgstr "" msgid "Elasticsearch reindexing was not started: %{errors}" msgstr "" -msgid "Elasticsearch returned status code: %{status_code}" -msgstr "" - msgid "Elasticsearch zero-downtime reindexing" msgstr "" @@ -14374,9 +14365,6 @@ msgstr "" msgid "Environment" msgstr "" -msgid "Environment does not have deployments" -msgstr "" - msgid "Environment is required for Stages::MetricEndpointInserter" msgstr "" @@ -20922,15 +20910,6 @@ msgstr "" msgid "Invalid URL: %{url}" msgstr "" -msgid "Invalid container_name" -msgstr "" - -msgid "Invalid cursor parameter" -msgstr "" - -msgid "Invalid cursor value provided" -msgstr "" - msgid "Invalid date" msgstr "" @@ -20973,9 +20952,6 @@ msgstr "" msgid "Invalid pin code." msgstr "" -msgid "Invalid pod_name" -msgstr "" - msgid "Invalid policy type" msgstr "" @@ -20988,15 +20964,9 @@ msgstr "" msgid "Invalid rule" msgstr "" -msgid "Invalid search parameter" -msgstr "" - msgid "Invalid server response" msgstr "" -msgid "Invalid start or end time format" -msgstr "" - msgid "Invalid status" msgstr "" @@ -22359,9 +22329,6 @@ msgstr "" msgid "Kubernetes" msgstr "" -msgid "Kubernetes API returned status code: %{error_code}" -msgstr "" - msgid "Kubernetes Cluster" msgstr "" @@ -25245,9 +25212,6 @@ msgstr "" msgid "Namespace ID:" msgstr "" -msgid "Namespace is empty" -msgstr "" - msgid "Namespace:" msgstr "" @@ -25720,9 +25684,6 @@ msgstr "" msgid "No connection could be made to a Gitaly Server, please check your logs!" msgstr "" -msgid "No containers available" -msgstr "" - msgid "No contributions" msgstr "" @@ -25858,9 +25819,6 @@ msgstr "" msgid "No plan" msgstr "" -msgid "No pods available" -msgstr "" - msgid "No policy matches this license" msgstr "" @@ -28869,12 +28827,6 @@ msgstr "" msgid "Please wait while we import the repository for you. Refresh at will." msgstr "" -msgid "Pod does not exist" -msgstr "" - -msgid "Pod not found" -msgstr "" - msgid "Pods in use" msgstr "" @@ -40763,9 +40715,6 @@ msgstr "" msgid "Unable to collect memory info" msgstr "" -msgid "Unable to connect to Elasticsearch" -msgstr "" - msgid "Unable to connect to Prometheus server" msgstr "" @@ -40775,9 +40724,6 @@ msgstr "" msgid "Unable to connect to the Jira instance. Please check your Jira integration configuration." msgstr "" -msgid "Unable to convert Kubernetes logs encoding to UTF-8" -msgstr "" - msgid "Unable to create link to vulnerability" msgstr "" @@ -40916,9 +40862,6 @@ msgstr "" msgid "Unknown Error" msgstr "" -msgid "Unknown cache key" -msgstr "" - msgid "Unknown encryption strategy: %{encrypted_strategy}!" msgstr "" @@ -45274,12 +45217,6 @@ msgstr "" msgid "container registry images" msgstr "" -msgid "container_name can contain only lowercase letters, digits, '-', and '.' and must start and end with an alphanumeric character" -msgstr "" - -msgid "container_name cannot be larger than %{max_length} chars" -msgstr "" - msgid "contains URLs that exceed the 1024 character limit (%{urls})" msgstr "" @@ -46270,12 +46207,6 @@ msgstr "" msgid "pipelineEditorWalkthrough|You can use the file tree to view your pipeline configuration files. %{linkStart}Learn more%{linkEnd}" msgstr "" -msgid "pod_name can contain only lowercase letters, digits, '-', and '.' and must start and end with an alphanumeric character" -msgstr "" - -msgid "pod_name cannot be larger than %{max_length} chars" -msgstr "" - msgid "point" msgid_plural "points" msgstr[0] ""