diff --git a/Gemfile b/Gemfile index 6737bc3012b93c2ae714de696d14a946012bee6a..40aca12aea8ca3129b636c1cd9935f51d37058f6 100644 --- a/Gemfile +++ b/Gemfile @@ -419,7 +419,8 @@ end gem 'octokit', '~> 4.15' -gem 'mail_room', '~> 0.10.0' +# https://gitlab.com/gitlab-org/gitlab/issues/207207 +gem 'gitlab-mail_room', '~> 0.0.2', require: 'mail_room' gem 'email_reply_trimmer', '~> 0.1' gem 'html2text' diff --git a/Gemfile.lock b/Gemfile.lock index f4a0ae4ebefd50a066015c8e4c11bf9a6981eb28..38de70b41ef602577c1d81895df14b607e7a9c75 100644 --- a/Gemfile.lock +++ b/Gemfile.lock @@ -388,6 +388,7 @@ GEM opentracing (~> 0.4) redis (> 3.0.0, < 5.0.0) gitlab-license (1.0.0) + gitlab-mail_room (0.0.2) gitlab-markup (1.7.0) gitlab-net-dns (0.9.1) gitlab-puma (4.3.1.gitlab.2) @@ -616,7 +617,6 @@ GEM lumberjack (1.0.13) mail (2.7.1) mini_mime (>= 0.1.1) - mail_room (0.10.0) marcel (0.3.3) mimemagic (~> 0.3.2) marginalia (1.8.0) @@ -1235,6 +1235,7 @@ DEPENDENCIES gitlab-chronic (~> 0.10.5) gitlab-labkit (= 0.10.0) gitlab-license (~> 1.0) + gitlab-mail_room (~> 0.0.2) gitlab-markup (~> 1.7.0) gitlab-net-dns (~> 0.9.1) gitlab-puma (~> 4.3.1.gitlab.2) @@ -1284,7 +1285,6 @@ DEPENDENCIES loofah (~> 2.2) lru_redux mail (= 2.7.1) - mail_room (~> 0.10.0) marginalia (~> 1.8.0) memory_profiler (~> 0.9) method_source (~> 0.8) diff --git a/app/assets/javascripts/diffs/components/diff_table_cell.vue b/app/assets/javascripts/diffs/components/diff_table_cell.vue index 9544fbe9fc574876b1277abc93f4a4d20d75841c..e0fb1226674d16c3a7421afa1041be064379070c 100644 --- a/app/assets/javascripts/diffs/components/diff_table_cell.vue +++ b/app/assets/javascripts/diffs/components/diff_table_cell.vue @@ -166,6 +166,7 @@ export default { :href="lineHref" @click="setHighlightedRow(lineCode)" > + {{ lineNumber }} </a> <diff-gutter-avatars v-if="shouldShowAvatarsOnGutter" diff --git a/app/assets/javascripts/notes/stores/getters.js b/app/assets/javascripts/notes/stores/getters.js index 4f8ff8240b24b6df8f4cb512b645980ecc455e12..3a1e795cff40481ac6cfb7a71d15c103ffd0fa60 100644 --- a/app/assets/javascripts/notes/stores/getters.js +++ b/app/assets/javascripts/notes/stores/getters.js @@ -28,6 +28,8 @@ export const getUserData = state => state.userData || {}; export const getUserDataByProp = state => prop => state.userData && state.userData[prop]; +export const descriptionVersion = state => state.descriptionVersion; + export const notesById = state => state.discussions.reduce((acc, note) => { note.notes.every(n => Object.assign(acc, { [n.id]: n })); diff --git a/app/assets/javascripts/vue_shared/components/notes/system_note.vue b/app/assets/javascripts/vue_shared/components/notes/system_note.vue index 0c4d75fb0ad4c48572e4d8c4cf2fe4005a134068..908f7196abf5f4429e598d5259a0e2802d93bdff 100644 --- a/app/assets/javascripts/vue_shared/components/notes/system_note.vue +++ b/app/assets/javascripts/vue_shared/components/notes/system_note.vue @@ -54,8 +54,8 @@ export default { }; }, computed: { - ...mapGetters(['targetNoteHash']), - ...mapState(['descriptionVersion', 'isLoadingDescriptionVersion']), + ...mapGetters(['targetNoteHash', 'descriptionVersion']), + ...mapState(['isLoadingDescriptionVersion']), noteAnchorId() { return `note_${this.note.id}`; }, diff --git a/app/assets/stylesheets/pages/diff.scss b/app/assets/stylesheets/pages/diff.scss index 24c6fec064a8b41145553eb18d1d2ffadd544d37..e83450f2dbc17265e0abc9735b043706bf5b9e27 100644 --- a/app/assets/stylesheets/pages/diff.scss +++ b/app/assets/stylesheets/pages/diff.scss @@ -485,10 +485,6 @@ table.code { } } } - - &:not(.js-unfold-bottom) a::before { - content: attr(data-linenumber); - } } &.line_content { diff --git a/app/models/concerns/cache_markdown_field.rb b/app/models/concerns/cache_markdown_field.rb index 9713e79f525b6ad255b880e4c0e10e807ffd1c0d..cc13f279c4d68f193d64e909a86845749f83de76 100644 --- a/app/models/concerns/cache_markdown_field.rb +++ b/app/models/concerns/cache_markdown_field.rb @@ -20,6 +20,10 @@ def skip_project_check? false end + def can_cache_field?(field) + true + end + # Returns the default Banzai render context for the cached markdown field. def banzai_render_context(field) raise ArgumentError.new("Unknown field: #{field.inspect}") unless @@ -38,17 +42,23 @@ def banzai_render_context(field) context end - # Update every column in a row if any one is invalidated, as we only store - # one version per row - def refresh_markdown_cache + def rendered_field_content(markdown_field) + return unless can_cache_field?(markdown_field) + options = { skip_project_check: skip_project_check? } + Banzai::Renderer.cacheless_render_field(self, markdown_field, options) + end + # Update every applicable column in a row if any one is invalidated, as we only store + # one version per row + def refresh_markdown_cache updates = cached_markdown_fields.markdown_fields.map do |markdown_field| [ cached_markdown_fields.html_field(markdown_field), - Banzai::Renderer.cacheless_render_field(self, markdown_field, options) + rendered_field_content(markdown_field) ] end.to_h + updates['cached_markdown_version'] = latest_cached_markdown_version updates.each { |field, data| write_markdown_field(field, data) } diff --git a/app/models/group.rb b/app/models/group.rb index a5337f19b38adb41fe342cecdb4e1ede6f71f605..d6a4af5af15add61433b457767215bcf2ff4db33 100644 --- a/app/models/group.rb +++ b/app/models/group.rb @@ -406,11 +406,15 @@ def mattermost_team_params end def ci_variables_for(ref, project) - list_of_ids = [self] + ancestors - variables = Ci::GroupVariable.where(group: list_of_ids) - variables = variables.unprotected unless project.protected_for?(ref) - variables = variables.group_by(&:group_id) - list_of_ids.reverse.flat_map { |group| variables[group.id] }.compact + cache_key = "ci_variables_for:group:#{self&.id}:project:#{project&.id}:ref:#{ref}" + + ::Gitlab::SafeRequestStore.fetch(cache_key) do + list_of_ids = [self] + ancestors + variables = Ci::GroupVariable.where(group: list_of_ids) + variables = variables.unprotected unless project.protected_for?(ref) + variables = variables.group_by(&:group_id) + list_of_ids.reverse.flat_map { |group| variables[group.id] }.compact + end end def group_member(user) diff --git a/app/models/project.rb b/app/models/project.rb index 5ec43de21feeb24b4d34e64ed3dc028a9be9d555..f72e777c00419f09577a1a82006bb71abdc35a42 100644 --- a/app/models/project.rb +++ b/app/models/project.rb @@ -1963,6 +1963,14 @@ def default_environment end def ci_variables_for(ref:, environment: nil) + cache_key = "ci_variables_for:project:#{self&.id}:ref:#{ref}:environment:#{environment}" + + ::Gitlab::SafeRequestStore.fetch(cache_key) do + uncached_ci_variables_for(ref: ref, environment: environment) + end + end + + def uncached_ci_variables_for(ref:, environment: nil) result = if protected_for?(ref) variables else diff --git a/app/models/concerns/resource_event_tools.rb b/app/models/resource_event.rb similarity index 56% rename from app/models/concerns/resource_event_tools.rb rename to app/models/resource_event.rb index 7226b9573e12d352f15f7d3307b9e974d30f6da5..9b3a211ad43120f895ccbf0b046a6445f52762e6 100644 --- a/app/models/concerns/resource_event_tools.rb +++ b/app/models/resource_event.rb @@ -1,16 +1,27 @@ # frozen_string_literal: true -module ResourceEventTools - extend ActiveSupport::Concern +class ResourceEvent < ApplicationRecord + include Gitlab::Utils::StrongMemoize + include Importable - included do - belongs_to :user + self.abstract_class = true - validates :user, presence: { unless: :importing? }, on: :create + validates :user, presence: { unless: :importing? }, on: :create - validate :exactly_one_issuable + belongs_to :user - scope :created_after, ->(time) { where('created_at > ?', time) } + scope :created_after, ->(time) { where('created_at > ?', time) } + + def discussion_id + strong_memoize(:discussion_id) do + Digest::SHA1.hexdigest(discussion_id_key.join("-")) + end + end + + private + + def discussion_id_key + [self.class.name, created_at, user_id] end def exactly_one_issuable diff --git a/app/models/resource_label_event.rb b/app/models/resource_label_event.rb index 59907f1b9628f263a2eb9a9a3b9c7a2f09272eb8..970d4e1e56261fbffc0d9dec0976b0b86a18019e 100644 --- a/app/models/resource_label_event.rb +++ b/app/models/resource_label_event.rb @@ -1,10 +1,7 @@ # frozen_string_literal: true -class ResourceLabelEvent < ApplicationRecord - include Importable - include Gitlab::Utils::StrongMemoize +class ResourceLabelEvent < ResourceEvent include CacheMarkdownField - include ResourceEventTools cache_markdown_field :reference @@ -13,8 +10,11 @@ class ResourceLabelEvent < ApplicationRecord belongs_to :label scope :inc_relations, -> { includes(:label, :user) } + scope :by_issue, ->(issue) { where(issue_id: issue.id) } + scope :by_merge_request, ->(merge_request) { where(merge_request_id: merge_request.id) } validates :label, presence: { unless: :importing? }, on: :create + validate :exactly_one_issuable after_save :expire_etag_cache after_destroy :expire_etag_cache @@ -41,12 +41,6 @@ def issuable issue || merge_request end - def discussion_id(resource = nil) - strong_memoize(:discussion_id) do - Digest::SHA1.hexdigest(discussion_id_key.join("-")) - end - end - def project issuable.project end @@ -109,10 +103,6 @@ def local_label? def resource_parent issuable.project || issuable.group end - - def discussion_id_key - [self.class.name, created_at, user_id] - end end ResourceLabelEvent.prepend_if_ee('EE::ResourceLabelEvent') diff --git a/app/models/resource_milestone_event.rb b/app/models/resource_milestone_event.rb index ba43a1ee3637af3658c217cfc210f56ffb8700c7..d362ebc307adf74e1fb9a738af5332264922599c 100644 --- a/app/models/resource_milestone_event.rb +++ b/app/models/resource_milestone_event.rb @@ -1,10 +1,6 @@ # frozen_string_literal: true -class ResourceMilestoneEvent < ApplicationRecord - include Gitlab::Utils::StrongMemoize - include Importable - include ResourceEventTools - +class ResourceMilestoneEvent < ResourceEvent belongs_to :issue belongs_to :merge_request belongs_to :milestone @@ -12,6 +8,8 @@ class ResourceMilestoneEvent < ApplicationRecord scope :by_issue, ->(issue) { where(issue_id: issue.id) } scope :by_merge_request, ->(merge_request) { where(merge_request_id: merge_request.id) } + validate :exactly_one_issuable + enum action: { add: 1, remove: 2 @@ -23,8 +21,4 @@ class ResourceMilestoneEvent < ApplicationRecord def self.issuable_attrs %i(issue merge_request).freeze end - - def resource - issue || merge_request - end end diff --git a/app/models/resource_weight_event.rb b/app/models/resource_weight_event.rb index ab288798aedc5b8cfee0c121fab838cfe7297785..e0cc0c87a830bd25c514eb3842d8d0be795705ec 100644 --- a/app/models/resource_weight_event.rb +++ b/app/models/resource_weight_event.rb @@ -1,26 +1,9 @@ # frozen_string_literal: true -class ResourceWeightEvent < ApplicationRecord - include Gitlab::Utils::StrongMemoize - - validates :user, presence: true +class ResourceWeightEvent < ResourceEvent validates :issue, presence: true - belongs_to :user belongs_to :issue scope :by_issue, ->(issue) { where(issue_id: issue.id) } - scope :created_after, ->(time) { where('created_at > ?', time) } - - def discussion_id(resource = nil) - strong_memoize(:discussion_id) do - Digest::SHA1.hexdigest(discussion_id_key.join("-")) - end - end - - private - - def discussion_id_key - [self.class.name, created_at, user_id] - end end diff --git a/app/models/snippet.rb b/app/models/snippet.rb index 8bba79bd944cb01b5412ea2fc520497e52680293..233834dbaf97d42844262578a117b68dacaf906e 100644 --- a/app/models/snippet.rb +++ b/app/models/snippet.rb @@ -301,6 +301,10 @@ def track_snippet_repository repository.update!(shard_name: repository_storage, disk_path: disk_path) end + def can_cache_field?(field) + field != :content || MarkupHelper.gitlab_markdown?(file_name) + end + class << self # Searches for snippets with a matching title or file name. # diff --git a/app/models/user_bot_type_enums.rb b/app/models/user_bot_type_enums.rb index b6b08ce650b05e86f8644e76253b1e715d80da42..e4b1751b07247b700f6ea96327b7017f33d52249 100644 --- a/app/models/user_bot_type_enums.rb +++ b/app/models/user_bot_type_enums.rb @@ -2,7 +2,7 @@ module UserBotTypeEnums def self.bots - # When adding a new key, please ensure you are not conflicting with EE-only keys in app/models/user_bot_types_enums.rb + # When adding a new key, please ensure you are not conflicting with EE-only keys in app/models/user_bot_type_enums.rb { alert_bot: 2 } diff --git a/bin/mail_room b/bin/mail_room index 74a84f5b2b477d64441989633a3b68963a4d1db5..2539e3d388e3dfc93727338ad17f91147983cdec 100755 --- a/bin/mail_room +++ b/bin/mail_room @@ -19,7 +19,7 @@ get_mail_room_pid() start() { - bin/daemon_with_pidfile $mail_room_pidfile bundle exec mail_room -q -c $mail_room_config >> $mail_room_logfile 2>&1 + bin/daemon_with_pidfile $mail_room_pidfile bundle exec mail_room --log-exit-as json -q -c $mail_room_config >> $mail_room_logfile 2>&1 } stop() diff --git a/changelogs/unreleased/201771.yml b/changelogs/unreleased/201771.yml new file mode 100644 index 0000000000000000000000000000000000000000..8677b3d853c3ffe00907ccad5de53525d8434f39 --- /dev/null +++ b/changelogs/unreleased/201771.yml @@ -0,0 +1,5 @@ +--- +title: Replace content_viewer_spec setTimeouts with semantic actions / events +merge_request: +author: Oregand +type: other diff --git a/changelogs/unreleased/202639.yml b/changelogs/unreleased/202639.yml new file mode 100644 index 0000000000000000000000000000000000000000..6fa70055e96be95d29f29f6a58de698d2fdc6c04 --- /dev/null +++ b/changelogs/unreleased/202639.yml @@ -0,0 +1,5 @@ +--- +title: Replace line diff number css selector with actual HTML inside MRs +merge_request: +author: Oregand +type: other diff --git a/changelogs/unreleased/207126-more-descriptive-error-messages-in-migration-helpers.yml b/changelogs/unreleased/207126-more-descriptive-error-messages-in-migration-helpers.yml new file mode 100644 index 0000000000000000000000000000000000000000..36ed216a5057675c355abae5bf0324f260bab00c --- /dev/null +++ b/changelogs/unreleased/207126-more-descriptive-error-messages-in-migration-helpers.yml @@ -0,0 +1,5 @@ +--- +title: Improve error messages of failed migrations +merge_request: 25457 +author: +type: changed diff --git a/changelogs/unreleased/207976-stop-markdown-caching-of-non-markdown-snippet-content.yml b/changelogs/unreleased/207976-stop-markdown-caching-of-non-markdown-snippet-content.yml new file mode 100644 index 0000000000000000000000000000000000000000..dd249140092db23aeb387f10c8bd7332e4c55ebc --- /dev/null +++ b/changelogs/unreleased/207976-stop-markdown-caching-of-non-markdown-snippet-content.yml @@ -0,0 +1,5 @@ +--- +title: Fix Snippet content incorrectly caching +merge_request: 25985 +author: +type: fixed diff --git a/changelogs/unreleased/208548-better-spec-test-for-error-tracking-web-ui.yml b/changelogs/unreleased/208548-better-spec-test-for-error-tracking-web-ui.yml new file mode 100644 index 0000000000000000000000000000000000000000..6b6d479e8157eca5e817b9f8d63d752740ee420a --- /dev/null +++ b/changelogs/unreleased/208548-better-spec-test-for-error-tracking-web-ui.yml @@ -0,0 +1,5 @@ +--- +title: Fix fixtures for Error Tracking Web UI +merge_request: 26233 +author: Takuya Noguchi +type: other diff --git a/changelogs/unreleased/34086-es-bulk-incremental-index-updates.yml b/changelogs/unreleased/34086-es-bulk-incremental-index-updates.yml new file mode 100644 index 0000000000000000000000000000000000000000..67cceb21af0c129e71f1d4e580f20864aee09554 --- /dev/null +++ b/changelogs/unreleased/34086-es-bulk-incremental-index-updates.yml @@ -0,0 +1,5 @@ +--- +title: 'Add a bulk processor for elasticsearch incremental updates' +merge_request: 24298 +author: +type: added diff --git a/changelogs/unreleased/fix-dependency-proxy-link.yml b/changelogs/unreleased/fix-dependency-proxy-link.yml new file mode 100644 index 0000000000000000000000000000000000000000..547d0334d1e29a4ce1b22ab69e2b0d4bd601c8c8 --- /dev/null +++ b/changelogs/unreleased/fix-dependency-proxy-link.yml @@ -0,0 +1,5 @@ +--- +title: Add link to dependency proxy docs on the dependency proxy page +merge_request: 26092 +author: +type: changed diff --git a/changelogs/unreleased/georgekoltsov-fix-epic-issues.yml b/changelogs/unreleased/georgekoltsov-fix-epic-issues.yml new file mode 100644 index 0000000000000000000000000000000000000000..7a856dc6e28513c63a5abde5bfe82d8f743f0e27 --- /dev/null +++ b/changelogs/unreleased/georgekoltsov-fix-epic-issues.yml @@ -0,0 +1,5 @@ +--- +title: Fix issues missing on epic's page after project import +merge_request: 26099 +author: +type: fixed diff --git a/changelogs/unreleased/kassio-fix-dev-seed.yml b/changelogs/unreleased/kassio-fix-dev-seed.yml new file mode 100644 index 0000000000000000000000000000000000000000..e55193184ddf6b24d505efb2007cda8b1a20df7d --- /dev/null +++ b/changelogs/unreleased/kassio-fix-dev-seed.yml @@ -0,0 +1,5 @@ +--- +title: Fix dev vulnerabilities seeder +merge_request: 26169 +author: +type: fixed diff --git a/changelogs/unreleased/mk-hide-secondary-only-setting.yml b/changelogs/unreleased/mk-hide-secondary-only-setting.yml new file mode 100644 index 0000000000000000000000000000000000000000..49107c23b16464217ef182040cf3c24801f1fbbf --- /dev/null +++ b/changelogs/unreleased/mk-hide-secondary-only-setting.yml @@ -0,0 +1,5 @@ +--- +title: 'Geo: Show secondary-only setting on only on secondaries' +merge_request: 26029 +author: +type: fixed diff --git a/changelogs/unreleased/sh-cache-ci-variables.yml b/changelogs/unreleased/sh-cache-ci-variables.yml new file mode 100644 index 0000000000000000000000000000000000000000..8534af3780876901a3bbbf245a544af2242701e9 --- /dev/null +++ b/changelogs/unreleased/sh-cache-ci-variables.yml @@ -0,0 +1,5 @@ +--- +title: Memoize loading of CI variables +merge_request: 26147 +author: +type: performance diff --git a/changelogs/unreleased/sh-disable-line-in-marginalia.yml b/changelogs/unreleased/sh-disable-line-in-marginalia.yml new file mode 100644 index 0000000000000000000000000000000000000000..51be4db1965c8340b9f858193fc5844ecb5ba760 --- /dev/null +++ b/changelogs/unreleased/sh-disable-line-in-marginalia.yml @@ -0,0 +1,5 @@ +--- +title: Disable Marginalia line backtrace in production +merge_request: 26199 +author: +type: performance diff --git a/config/gitlab.yml.example b/config/gitlab.yml.example index 20c75a6e255f0ce5622d6a1cde06bb2562803187..330e5109ed416017ab82d003adc49518917fffcb 100644 --- a/config/gitlab.yml.example +++ b/config/gitlab.yml.example @@ -454,6 +454,11 @@ production: &base pseudonymizer_worker: cron: "0 * * * *" + # Elasticsearch bulk updater for incremental updates. + # NOTE: This will only take effect if elasticsearch is enabled. + elastic_index_bulk_cron_worker: + cron: "*/1 * * * *" + registry: # enabled: true # host: registry.example.com diff --git a/config/initializers/0_marginalia.rb b/config/initializers/0_marginalia.rb index f88a90854e3d11871d46ea7f8a582a2e798de669..a697f67dbf22eb0949ccf7560818823ff0fa4b6f 100644 --- a/config/initializers/0_marginalia.rb +++ b/config/initializers/0_marginalia.rb @@ -9,7 +9,13 @@ # Refer: https://github.com/basecamp/marginalia/blob/v1.8.0/lib/marginalia/railtie.rb#L67 ActiveRecord::ConnectionAdapters::PostgreSQLAdapter.prepend(Gitlab::Marginalia::ActiveRecordInstrumentation) -Marginalia::Comment.components = [:application, :controller, :action, :correlation_id, :jid, :job_class, :line] +Marginalia::Comment.components = [:application, :controller, :action, :correlation_id, :jid, :job_class] + +# As mentioned in https://github.com/basecamp/marginalia/pull/93/files, +# adding :line has some overhead because a regexp on the backtrace has +# to be run on every SQL query. Only enable this in development because +# we've seen it slow things down. +Marginalia::Comment.components << :line if Rails.env.development? Gitlab::Marginalia.set_application_name diff --git a/config/initializers/1_settings.rb b/config/initializers/1_settings.rb index 156cf78dfc42f77967ae033ff7c99e3ebb5ac1db..684ccb73603e137905debd1d7fe2d037816e7f0b 100644 --- a/config/initializers/1_settings.rb +++ b/config/initializers/1_settings.rb @@ -537,6 +537,9 @@ Settings.cron_jobs['update_max_seats_used_for_gitlab_com_subscriptions_worker'] ||= Settingslogic.new({}) Settings.cron_jobs['update_max_seats_used_for_gitlab_com_subscriptions_worker']['cron'] ||= '0 12 * * *' Settings.cron_jobs['update_max_seats_used_for_gitlab_com_subscriptions_worker']['job_class'] = 'UpdateMaxSeatsUsedForGitlabComSubscriptionsWorker' + Settings.cron_jobs['elastic_index_bulk_cron_worker'] ||= Settingslogic.new({}) + Settings.cron_jobs['elastic_index_bulk_cron_worker']['cron'] ||= '*/1 * * * *' + Settings.cron_jobs['elastic_index_bulk_cron_worker']['job_class'] ||= 'ElasticIndexBulkCronWorker' end # diff --git a/config/routes.rb b/config/routes.rb index 16b15e5300a7c9bdda2b95831c2861e5e3bfd25d..cf39ca3384d25544dde2ae747b666b919a1e9b76 100644 --- a/config/routes.rb +++ b/config/routes.rb @@ -121,10 +121,7 @@ draw :country draw :country_state draw :subscription - - constraints(-> (*) { Gitlab::Analytics.any_features_enabled? }) do - draw :analytics - end + draw :analytics end if ENV['GITLAB_CHAOS_SECRET'] || Rails.env.development? || Rails.env.test? diff --git a/doc/administration/auth/oidc.md b/doc/administration/auth/oidc.md index 6f59cffc3ccae90cdf6a8dfe5e877b83f5daa69d..0160db1b3886c55724f3832b955c4db872ab3f5e 100644 --- a/doc/administration/auth/oidc.md +++ b/doc/administration/auth/oidc.md @@ -42,6 +42,7 @@ The OpenID Connect will provide you with a client details and secret for you to 'discovery' => true, 'client_auth_method' => 'query', 'uid_field' => '<uid_field>', + 'send_scope_to_token_endpoint' => 'false', 'client_options' => { 'identifier' => '<your_oidc_client_id>', 'secret' => '<your_oidc_client_secret>', @@ -65,6 +66,7 @@ The OpenID Connect will provide you with a client details and secret for you to discovery: true, client_auth_method: 'query', uid_field: '<uid_field>', + send_scope_to_token_endpoint: false, client_options: { identifier: '<your_oidc_client_id>', secret: '<your_oidc_client_secret>', @@ -92,6 +94,8 @@ The OpenID Connect will provide you with a client details and secret for you to - If not specified, defaults to `basic`. - `<uid_field>` (optional) is the field name from the `user_info` details that will be used as `uid` value. For example, `preferred_username`. If this value is not provided or the field with the configured value is missing from the `user_info` details, the `uid` will use the `sub` field. + - `send_scope_to_token_endpoint` is `true` by default. In other words, the `scope` parameter is normally included in requests to the token endpoint. + However, if your OpenID Connect provider does not accept the `scope` parameter in such requests, set this to `false`. - `client_options` are the OpenID Connect client-specific options. Specifically: - `identifier` is the client identifier as configured in the OpenID Connect service provider. - `secret` is the client secret as configured in the OpenID Connect service provider. diff --git a/doc/administration/auth/okta.md b/doc/administration/auth/okta.md index 7b5effe3d778a14b27e4fad41d767e6b9dc1a846..c83645ffe740aa3338030eeacc517215f81912b0 100644 --- a/doc/administration/auth/okta.md +++ b/doc/administration/auth/okta.md @@ -42,21 +42,6 @@ Now that the Okta app is configured, it's time to enable it in GitLab. ## Configure GitLab -1. On your GitLab server, open the configuration file: - - **For Omnibus GitLab installations** - - ```shell - sudo editor /etc/gitlab/gitlab.rb - ``` - - **For installations from source** - - ```shell - cd /home/git/gitlab - sudo -u git -H editor config/gitlab.yml - ``` - 1. See [Initial OmniAuth Configuration](../../integration/omniauth.md#initial-omniauth-configuration) for initial settings. @@ -66,13 +51,19 @@ Now that the Okta app is configured, it's time to enable it in GitLab. **For Omnibus GitLab installations** + Edit `/etc/gitlab/gitlab.rb`: + ```ruby gitlab_rails['omniauth_allow_single_sign_on'] = ['saml'] gitlab_rails['omniauth_block_auto_created_users'] = false ``` + --- + **For installations from source** + Edit `config/gitlab.yml`: + ```yaml allow_single_sign_on: ["saml"] block_auto_created_users: false @@ -83,15 +74,21 @@ Now that the Okta app is configured, it's time to enable it in GitLab. **For Omnibus GitLab installations** + Edit `/etc/gitlab/gitlab.rb`: + ```ruby gitlab_rails['omniauth_auto_link_saml_user'] = true ``` + --- + **For installations from source** - ```yaml - auto_link_saml_user: true - ``` + Edit `config/gitlab.yml`: + + ```yaml + auto_link_saml_user: true + ``` 1. Add the provider configuration. diff --git a/doc/administration/gitaly/index.md b/doc/administration/gitaly/index.md index 390e0ae05af2555f674b2ed49d1e1bea42fe29b7..92a44271775461e17dfb192f70db158a33e9a74b 100644 --- a/doc/administration/gitaly/index.md +++ b/doc/administration/gitaly/index.md @@ -6,9 +6,9 @@ components can read or write Git data. GitLab components that access Git repositories (GitLab Rails, GitLab Shell, GitLab Workhorse, etc.) act as clients to Gitaly. End users do not have direct access to Gitaly. -In the rest of this page, Gitaly server is referred to the standalone node that -only runs Gitaly, and Gitaly client to the GitLab Rails node that runs all other -processes except Gitaly. +On this page, *Gitaly server* refers to a standalone node that only runs Gitaly +and *Gitaly client* is a GitLab Rails app node that runs all other processes +except Gitaly. ## Architecture @@ -20,7 +20,7 @@ Here's a high-level architecture overview of how Gitaly is used. The Gitaly service itself is configured via a [TOML configuration file](reference.md). -In case you want to change some of its settings: +If you want to change any of its settings: **For Omnibus GitLab** @@ -54,10 +54,6 @@ scenario, the [new repository indexer](../../integration/elasticsearch.md#elasti needs to be enabled in your GitLab configuration. [Since GitLab v12.3](https://gitlab.com/gitlab-org/gitlab/issues/6481), the new indexer becomes the default and no configuration is required. -NOTE: **Note:** While Gitaly can be used as a replacement for NFS, it's not recommended -to use EFS as it may impact GitLab's performance. Review the [relevant documentation](../high_availability/nfs.md#avoid-using-awss-elastic-file-system-efs) -for more details. - ### Network architecture The following list depicts what the network architecture of Gitaly is: @@ -568,30 +564,6 @@ server with the following settings. 1. Save the file and [restart GitLab](../restart_gitlab.md#installations-from-source). -## Eliminating NFS altogether - -If you are planning to use Gitaly without NFS for your storage needs -and want to eliminate NFS from your environment altogether, there are -a few things that you need to do: - -1. Make sure the [`git` user home directory](https://docs.gitlab.com/omnibus/settings/configuration.html#moving-the-home-directory-for-a-user) is on local disk. -1. Configure [database lookup of SSH keys](../operations/fast_ssh_key_lookup.md) - to eliminate the need for a shared `authorized_keys` file. -1. Configure [object storage for job artifacts](../job_artifacts.md#using-object-storage) - including [incremental logging](../job_logs.md#new-incremental-logging-architecture). -1. Configure [object storage for LFS objects](../lfs/lfs_administration.md#storing-lfs-objects-in-remote-object-storage). -1. Configure [object storage for uploads](../uploads.md#using-object-storage-core-only). -1. Configure [object storage for merge request diffs](../merge_request_diffs.md#using-object-storage). -1. Configure [object storage for packages](../packages/index.md#using-object-storage) (optional feature). -1. Configure [object storage for dependency proxy](../packages/dependency_proxy.md#using-object-storage) (optional feature). -1. Configure [object storage for Mattermost](https://docs.mattermost.com/administration/config-settings.html#file-storage) (optional feature). - -NOTE: **Note:** -One current feature of GitLab that still requires a shared directory (NFS) is -[GitLab Pages](../../user/project/pages/index.md). -There is [work in progress](https://gitlab.com/gitlab-org/gitlab-pages/issues/196) -to eliminate the need for NFS to support GitLab Pages. - ## Limiting RPC concurrency It can happen that CI clone traffic puts a large strain on your Gitaly diff --git a/doc/administration/high_availability/README.md b/doc/administration/high_availability/README.md index 2c2fc075dbe7bdcc8c841f9ed94ff060d868d7d8..ea7ffb7aa51bc28606b4f31fb3875642007c52aa 100644 --- a/doc/administration/high_availability/README.md +++ b/doc/administration/high_availability/README.md @@ -4,210 +4,56 @@ type: reference, concepts # Scaling and High Availability -GitLab supports a number of options for scaling your self-managed instance and configuring high availability (HA). -The solution you choose will be based on the level of scalability and -availability you require. The easiest solutions are scalable, but not necessarily -highly available. - -GitLab provides a service that is essential to most organizations: it -enables people to collaborate on code in a timely fashion. Any downtime should -therefore be short and planned. Due to the distributed nature -of Git, developers can continue to commit code locally even when GitLab is not -available. However, some GitLab features such as the issue tracker and -continuous integration are not available when GitLab is down. -If you require all GitLab functionality to be highly available, -consider the options outlined below. - -**Keep in mind that all highly-available solutions come with a trade-off between -cost/complexity and uptime**. The more uptime you want, the more complex the -solution. And the more complex the solution, the more work is involved in -setting up and maintaining it. High availability is not free and every HA -solution should balance the costs against the benefits. - -There are many options when choosing a highly-available GitLab architecture. We -recommend engaging with GitLab Support to choose the best architecture for your -use case. This page contains recommendations based on -experience with GitLab.com and internal scale testing. +GitLab supports a number of options for larger self-managed instances to +ensure that they are scalable and highly available. While these needs can be tackled +individually, they typically go hand in hand: a performant scalable environment +will have availability by default, as its components are separated and pooled. + +On this page, we present recommendations for setups based on the number +of users you expect. For larger setups we give several recommended +architectures based on experience with GitLab.com and internal scale +testing that aim to achieve the right balance between both scalability +and availability. For detailed insight into how GitLab scales and configures GitLab.com, you can watch [this 1 hour Q&A](https://www.youtube.com/watch?v=uCU8jdYzpac) -with [John Northrup](https://gitlab.com/northrup), and live questions coming in from some of our customers. - -## GitLab Components - -The following components need to be considered for a scaled or highly-available -environment. In many cases, components can be combined on the same nodes to reduce -complexity. - -- GitLab application nodes (Unicorn / Puma, Workhorse) - Web-requests (UI, API, Git over HTTP) -- Sidekiq - Asynchronous/Background jobs -- PostgreSQL - Database - - Consul - Database service discovery and health checks/failover - - PgBouncer - Database pool manager -- Redis - Key/Value store (User sessions, cache, queue for Sidekiq) - - Sentinel - Redis health check/failover manager -- Gitaly - Provides high-level storage and RPC access to Git repositories -- S3 Object Storage service[^4] and / or NFS storage servers[^5] for entities such as Uploads, Artifacts, LFS Objects, etc... -- Load Balancer[^6] - Main entry point and handles load balancing for the GitLab application nodes. -- Monitor - Prometheus and Grafana monitoring with auto discovery. - -## Scalable Architecture Examples - -When an organization reaches a certain threshold it will be necessary to scale -the GitLab instance. Still, true high availability may not be necessary. There -are options for scaling GitLab instances relatively easily without incurring the -infrastructure and maintenance costs of full high availability. - -### Basic Scaling - -This is the simplest form of scaling and will work for the majority of -cases. Backend components such as PostgreSQL, Redis, and storage are offloaded -to their own nodes while the remaining GitLab components all run on 2 or more -application nodes. - -This form of scaling also works well in a cloud environment when it is more -cost effective to deploy several small nodes rather than a single -larger one. - -- 1 PostgreSQL node -- 1 Redis node -- 1 Gitaly node -- 1 or more Object Storage services[^4] and / or NFS storage server[^5] -- 2 or more GitLab application nodes (Unicorn / Puma, Workhorse, Sidekiq) -- 1 or more Load Balancer nodes[^6] -- 1 Monitoring node (Prometheus, Grafana) - -#### Installation Instructions - -Complete the following installation steps in order. A link at the end of each -section will bring you back to the Scalable Architecture Examples section so -you can continue with the next step. - -1. [Load Balancer(s)](load_balancer.md)[^6] -1. [Consul](consul.md) -1. [PostgreSQL](database.md#postgresql-in-a-scaled-environment) with [PgBouncer](pgbouncer.md) -1. [Redis](redis.md#redis-in-a-scaled-environment) -1. [Gitaly](gitaly.md) (recommended) and / or [NFS](nfs.md)[^5] -1. [GitLab application nodes](gitlab.md) - - With [Object Storage service enabled](../gitaly/index.md#eliminating-nfs-altogether)[^4] -1. [Monitoring node (Prometheus and Grafana)](monitoring_node.md) - -### Full Scaling - -For very large installations, it might be necessary to further split components -for maximum scalability. In a fully-scaled architecture, the application node -is split into separate Sidekiq and Unicorn/Workhorse nodes. One indication that -this architecture is required is if Sidekiq queues begin to periodically increase -in size, indicating that there is contention or there are not enough resources. - -- 1 or more PostgreSQL nodes -- 1 or more Redis nodes -- 1 or more Gitaly storage servers -- 1 or more Object Storage services[^4] and / or NFS storage server[^5] -- 2 or more Sidekiq nodes -- 2 or more GitLab application nodes (Unicorn / Puma, Workhorse, Sidekiq) -- 1 or more Load Balancer nodes[^6] -- 1 Monitoring node (Prometheus, Grafana) - -## High Availability Architecture Examples - -When organizations require scaling *and* high availability, the following -architectures can be utilized. As the introduction section at the top of this -page mentions, there is a tradeoff between cost/complexity and uptime. Be sure -this complexity is absolutely required before taking the step into full -high availability. - -For all examples below, we recommend running Consul and Redis Sentinel separately -from the services they monitor. If Consul is running on PostgreSQL nodes or Sentinel on -Redis nodes, there is a potential that high resource usage by PostgreSQL or -Redis could prevent communication between the other Consul and Sentinel nodes. -This may lead to the other nodes believing a failure has occurred and initiating -automated failover. Isolating Consul and Redis Sentinel from the services they monitor -reduces the chances of a false positive that a failure has occurred. - -The examples below do not address high availability of NFS for objects. We recommend a -S3 Object Storage service[^4] is used where possible over NFS but it's still required in -certain cases[^5]. Where NFS is to be used some enterprises have access to NFS appliances -that manage availability and this would be best case scenario. - -There are many options in between each of these examples. Work with GitLab Support -to understand the best starting point for your workload and adapt from there. - -### Horizontal - -This is the simplest form of high availability and scaling. It requires the -fewest number of individual servers (virtual or physical) but does have some -trade-offs and limits. - -This architecture will work well for many GitLab customers. Larger customers -may begin to notice certain events cause contention/high load - for example, -cloning many large repositories with binary files, high API usage, a large -number of enqueued Sidekiq jobs, and so on. If this happens, you should consider -moving to a hybrid or fully distributed architecture depending on what is causing -the contention. - -- 3 PostgreSQL nodes -- 3 Redis nodes -- 3 Consul / Sentinel nodes -- 2 or more GitLab application nodes (Unicorn / Puma, Workhorse, Sidekiq) -- 1 Gitaly storage servers -- 1 Object Storage service[^4] and / or NFS storage server[^5] -- 1 or more Load Balancer nodes[^6] -- 1 Monitoring node (Prometheus, Grafana) - - - -### Hybrid - -In this architecture, certain components are split on dedicated nodes so high -resource usage of one component does not interfere with others. In larger -environments this is a good architecture to consider if you foresee or do have -contention due to certain workloads. - -- 3 PostgreSQL nodes -- 1 PgBouncer node -- 3 Redis nodes -- 3 Consul / Sentinel nodes -- 2 or more Sidekiq nodes -- 2 or more GitLab application nodes (Unicorn / Puma, Workhorse, Sidekiq) -- 1 Gitaly storage servers -- 1 Object Storage service[^4] and / or NFS storage server[^5] -- 1 or more Load Balancer nodes[^6] -- 1 Monitoring node (Prometheus, Grafana) - - - -### Fully Distributed - -This architecture scales to hundreds of thousands of users and projects and is -the basis of the GitLab.com architecture. While this scales well it also comes -with the added complexity of many more nodes to configure, manage, and monitor. - -- 3 PostgreSQL nodes -- 1 or more PgBouncer nodes (with associated internal load balancers) -- 4 or more Redis nodes (2 separate clusters for persistent and cache data) -- 3 Consul nodes -- 3 Sentinel nodes -- Multiple dedicated Sidekiq nodes (Split into real-time, best effort, ASAP, - CI Pipeline and Pull Mirror sets) -- 2 or more Git nodes (Git over SSH/Git over HTTP) -- 2 or more API nodes (All requests to `/api`) -- 2 or more Web nodes (All other web requests) -- 2 or more Gitaly storage servers -- 1 or more Object Storage services[^4] and / or NFS storage servers[^5] -- 1 or more Load Balancer nodes[^6] -- 1 Monitoring node (Prometheus, Grafana) - - - -## Reference Architecture Recommendations - -The Support and Quality teams build, performance test, and validate Reference -Architectures that support large numbers of users. The specifications below are -a representation of this work so far and may be adjusted in the future based on -additional testing and iteration. - -The architectures have been tested with specific coded workloads, and the +with [John Northrup](https://gitlab.com/northrup), and live questions coming +in from some of our customers. + +## Recommended Setups based on number of users + +- 1 - 1000 Users: A single-node [Omnibus](https://docs.gitlab.com/omnibus/) setup with frequent backups. Refer to the [requirements page](../../install/requirements.md) for further details of the specs you will require. +- 2000 - 50000+ Users: A scaled HA environment based on one of our [Reference Architectures](#reference-architectures) below. + +## GitLab Components and Configuration Instructions + +The GitLab application depends on the following [components](../../development/architecture.md#component-diagram) +and services. They are included in the reference architectures along with our +recommendations for their use and configuration. They are presented in the order +in which you would typically configure them. + +| Component | Description | Configuration Instructions | +|-------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------| +| [Load Balancer(s)](load_balancer.md)[^6] | Handles load balancing for the GitLab nodes where required. | [Load balancer HA configuration](load_balancer.md) | +| [Consul](../../development/architecture.md#consul)[^3] | Service discovery and health checks/failover | [Consul HA configuration](consul.md) | +| [PostgreSQL](../../development/architecture.md#postgresql) | Database | [Database HA configuration](database.md) | +| [PgBouncer](../../development/architecture.md#pgbouncer) | Database Pool Manager | [PgBouncer HA configuration](pgbouncer.md) | +| [Redis](../../development/architecture.md#redis)[^3] with Redis Sentinel | Key/Value store for shared data with HA watcher service | [Redis HA configuration](redis.md) | +| [Gitaly](../../development/architecture.md#gitaly)[^2] [^5] [^7] | Recommended high-level storage for Git repository data. | [Gitaly HA configuration](gitaly.md) | +| [Sidekiq](../../development/architecture.md#sidekiq) | Asynchronous/Background jobs | | +| [Cloud Object Storage service](object_storage.md)[^4] | Recommended store for shared data objects such as LFS, Uploads, Artifacts, etc... | [Cloud Object Storage configuration](object_storage.md) | +| [GitLab application nodes](../../development/architecture.md#unicorn)[^1] | (Unicorn / Puma, Workhorse) - Web-requests (UI, API, Git over HTTP) | [GitLab app HA/scaling configuration](gitlab.md) | +| [NFS](nfs.md)[^5] [^7] | Shared disk storage service. Can be used as an alternative for Gitaly or Object Storage. Required for GitLab Pages. | [NFS configuration](nfs.md) | +| [Prometheus](../../development/architecture.md#prometheus) and [Grafana](../../development/architecture.md#grafana) | GitLab environment monitoring | [Monitoring node for scaling/HA](monitoring_node.md) | + +In some cases, components can be combined on the same nodes to reduce complexity as well. + +## Reference Architectures + +In this section we'll detail the Reference Architectures that can support large numbers +of users. These were built, tested and verified by our Quality and Support teams. + +Testing was done with our GitLab Performance Tool at specific coded workloads, and the throughputs used for testing were calculated based on sample customer data. We test each endpoint type with the following number of requests per second (RPS) per 1000 users: @@ -235,11 +81,11 @@ On different cloud vendors a best effort like for like can be used. | GitLab Rails[^1] | 3 | 8 vCPU, 7.2GB Memory | n1-highcpu-8 | | PostgreSQL | 3 | 2 vCPU, 7.5GB Memory | n1-standard-2 | | PgBouncer | 3 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | -| Gitaly[^2] [^7] | X | 4 vCPU, 15GB Memory | n1-standard-4 | +| Gitaly[^2] [^5] [^7] | X | 4 vCPU, 15GB Memory | n1-standard-4 | | Redis[^3] | 3 | 2 vCPU, 7.5GB Memory | n1-standard-2 | | Consul + Sentinel[^3] | 3 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | | Sidekiq | 4 | 2 vCPU, 7.5GB Memory | n1-standard-2 | -| S3 Object Storage[^4] | - | - | - | +| Cloud Object Storage[^4] | - | - | - | | NFS Server[^5] [^7] | 1 | 4 vCPU, 3.6GB Memory | n1-highcpu-4 | | Monitoring node | 1 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | | External load balancing node[^6] | 1 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | @@ -257,11 +103,11 @@ On different cloud vendors a best effort like for like can be used. | GitLab Rails[^1] | 3 | 16 vCPU, 14.4GB Memory | n1-highcpu-16 | | PostgreSQL | 3 | 2 vCPU, 7.5GB Memory | n1-standard-2 | | PgBouncer | 3 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | -| Gitaly[^2] [^7] | X | 8 vCPU, 30GB Memory | n1-standard-8 | +| Gitaly[^2] [^5] [^7] | X | 8 vCPU, 30GB Memory | n1-standard-8 | | Redis[^3] | 3 | 2 vCPU, 7.5GB Memory | n1-standard-2 | | Consul + Sentinel[^3] | 3 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | | Sidekiq | 4 | 2 vCPU, 7.5GB Memory | n1-standard-2 | -| S3 Object Storage[^4] | - | - | - | +| Cloud Object Storage[^4] | - | - | - | | NFS Server[^5] [^7] | 1 | 4 vCPU, 3.6GB Memory | n1-highcpu-4 | | Monitoring node | 1 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | | External load balancing node[^6] | 1 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | @@ -279,14 +125,14 @@ On different cloud vendors a best effort like for like can be used. | GitLab Rails[^1] | 3 | 32 vCPU, 28.8GB Memory | n1-highcpu-32 | | PostgreSQL | 3 | 4 vCPU, 15GB Memory | n1-standard-4 | | PgBouncer | 3 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | -| Gitaly[^2] [^7] | X | 16 vCPU, 60GB Memory | n1-standard-16 | +| Gitaly[^2] [^5] [^7] | X | 16 vCPU, 60GB Memory | n1-standard-16 | | Redis[^3] - Cache | 3 | 4 vCPU, 15GB Memory | n1-standard-4 | | Redis[^3] - Queues / Shared State | 3 | 4 vCPU, 15GB Memory | n1-standard-4 | | Redis Sentinel[^3] - Cache | 3 | 1 vCPU, 1.7GB Memory | g1-small | | Redis Sentinel[^3] - Queues / Shared State | 3 | 1 vCPU, 1.7GB Memory | g1-small | | Consul | 3 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | | Sidekiq | 4 | 4 vCPU, 15GB Memory | n1-standard-4 | -| S3 Object Storage[^4] | - | - | - | +| Cloud Object Storage[^4] | - | - | - | | NFS Server[^5] [^7] | 1 | 4 vCPU, 3.6GB Memory | n1-highcpu-4 | | Monitoring node | 1 | 4 vCPU, 3.6GB Memory | n1-highcpu-4 | | External load balancing node[^6] | 1 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | @@ -304,14 +150,14 @@ On different cloud vendors a best effort like for like can be used. | GitLab Rails[^1] | 7 | 32 vCPU, 28.8GB Memory | n1-highcpu-32 | | PostgreSQL | 3 | 8 vCPU, 30GB Memory | n1-standard-8 | | PgBouncer | 3 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | -| Gitaly[^2] [^7] | X | 32 vCPU, 120GB Memory | n1-standard-32 | +| Gitaly[^2] [^5] [^7] | X | 32 vCPU, 120GB Memory | n1-standard-32 | | Redis[^3] - Cache | 3 | 4 vCPU, 15GB Memory | n1-standard-4 | | Redis[^3] - Queues / Shared State | 3 | 4 vCPU, 15GB Memory | n1-standard-4 | | Redis Sentinel[^3] - Cache | 3 | 1 vCPU, 1.7GB Memory | g1-small | | Redis Sentinel[^3] - Queues / Shared State | 3 | 1 vCPU, 1.7GB Memory | g1-small | | Consul | 3 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | | Sidekiq | 4 | 4 vCPU, 15GB Memory | n1-standard-4 | -| S3 Object Storage[^4] | - | - | - | +| Cloud Object Storage[^4] | - | - | - | | NFS Server[^5] [^7] | 1 | 4 vCPU, 3.6GB Memory | n1-highcpu-4 | | Monitoring node | 1 | 4 vCPU, 3.6GB Memory | n1-highcpu-4 | | External load balancing node[^6] | 1 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | @@ -329,7 +175,7 @@ On different cloud vendors a best effort like for like can be used. | GitLab Rails[^1] | 15 | 32 vCPU, 28.8GB Memory | n1-highcpu-32 | | PostgreSQL | 3 | 16 vCPU, 60GB Memory | n1-standard-16 | | PgBouncer | 3 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | -| Gitaly[^2] [^7] | X | 64 vCPU, 240GB Memory | n1-standard-64 | +| Gitaly[^2] [^5] [^7] | X | 64 vCPU, 240GB Memory | n1-standard-64 | | Redis[^3] - Cache | 3 | 4 vCPU, 15GB Memory | n1-standard-4 | | Redis[^3] - Queues / Shared State | 3 | 4 vCPU, 15GB Memory | n1-standard-4 | | Redis Sentinel[^3] - Cache | 3 | 1 vCPU, 1.7GB Memory | g1-small | @@ -337,7 +183,7 @@ On different cloud vendors a best effort like for like can be used. | Consul | 3 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | | Sidekiq | 4 | 4 vCPU, 15GB Memory | n1-standard-4 | | NFS Server[^5] [^7] | 1 | 4 vCPU, 3.6GB Memory | n1-highcpu-4 | -| S3 Object Storage[^4] | - | - | - | +| Cloud Object Storage[^4] | - | - | - | | Monitoring node | 1 | 4 vCPU, 3.6GB Memory | n1-highcpu-4 | | External load balancing node[^6] | 1 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | | Internal load balancing node[^6] | 1 | 8 vCPU, 7.2GB Memory | n1-highcpu-8 | @@ -361,7 +207,7 @@ On different cloud vendors a best effort like for like can be used. and another for the Queues and Shared State classes respectively. We also recommend that you run the Redis Sentinel clusters separately as well for each Redis Cluster. -[^4]: For data objects such as LFS, Uploads, Artifacts, etc... We recommend a S3 Object Storage +[^4]: For data objects such as LFS, Uploads, Artifacts, etc... We recommend a Cloud Object Storage where possible over NFS due to better performance and availability. Several types of objects are supported for S3 storage - [Job artifacts](../job_artifacts.md#using-object-storage), [LFS](../lfs/lfs_administration.md#storing-lfs-objects-in-remote-object-storage), @@ -370,15 +216,15 @@ On different cloud vendors a best effort like for like can be used. [Packages](../packages/index.md#using-object-storage) (Optional Feature), [Dependency Proxy](../packages/dependency_proxy.md#using-object-storage) (Optional Feature). -[^5]: NFS storage server is still required for [GitLab Pages](https://gitlab.com/gitlab-org/gitlab-pages/issues/196) - and optionally for CI Job Incremental Logging - ([can be switched to use Redis instead](../job_logs.md#new-incremental-logging-architecture)). +[^5]: NFS can be used as an alternative for both repository data (replacing Gitaly) and + object storage but this isn't typically recommended for performance reasons. Note however it is required for + [GitLab Pages](https://gitlab.com/gitlab-org/gitlab-pages/issues/196). [^6]: Our architectures have been tested and validated with [HAProxy](https://www.haproxy.org/) as the load balancer. However other reputable load balancers with similar feature sets should also work instead but be aware these aren't validated. -[^7]: We strongly recommend that the Gitaly and / or NFS nodes are set up with SSD disks over +[^7]: We strongly recommend that any Gitaly and / or NFS nodes are set up with SSD disks over HDD with a throughput of at least 8,000 IOPS for read operations and 2,000 IOPS for write as these components have heavy I/O. These IOPS values are recommended only as a starter as with time they may be adjusted higher or lower depending on the scale of your diff --git a/doc/administration/high_availability/database.md b/doc/administration/high_availability/database.md index daeb0f9baf53b046a7499c5bbffa6687358e5519..596df656e2e79298f33553a66f22f778ad8846d5 100644 --- a/doc/administration/high_availability/database.md +++ b/doc/administration/high_availability/database.md @@ -22,11 +22,9 @@ If you use a cloud-managed service, or provide your own PostgreSQL: 1. Configure the GitLab application servers with the appropriate details. This step is covered in [Configuring GitLab for HA](gitlab.md). -## PostgreSQL in a Scaled Environment +## PostgreSQL in a Scaled and Highly Available Environment -This section is relevant for [Scaled Architecture](README.md#scalable-architecture-examples) -environments including [Basic Scaling](README.md#basic-scaling) and -[Full Scaling](README.md#full-scaling). +This section is relevant for [Scalable and Highly Available Setups](README.md). ### Provide your own PostgreSQL instance **(CORE ONLY)** @@ -94,23 +92,6 @@ deploy the bundled PostgreSQL. Advanced configuration options are supported and can be added if needed. -Continue configuration of other components by going -[back to Scaled Architectures](README.md#scalable-architecture-examples) - -## PostgreSQL with High Availability - -This section is relevant for [High Availability Architecture](README.md#high-availability-architecture-examples) -environments including [Horizontal](README.md#horizontal), -[Hybrid](README.md#hybrid), and -[Fully Distributed](README.md#fully-distributed). - -### Provide your own PostgreSQL instance **(CORE ONLY)** - -If you want to use your own deployed PostgreSQL instance(s), -see [Provide your own PostgreSQL instance](#provide-your-own-postgresql-instance-core-only) -for more details. However, you can use the GitLab Omnibus package to easily -deploy the bundled PostgreSQL. - ### High Availability with GitLab Omnibus **(PREMIUM ONLY)** > Important notes: diff --git a/doc/administration/high_availability/gitaly.md b/doc/administration/high_availability/gitaly.md index 739d1ae35fbd1d8b9f41a926d31ba7b668870972..bb40747b24cd87c9d4568fdc80237458cc2aa6d0 100644 --- a/doc/administration/high_availability/gitaly.md +++ b/doc/administration/high_availability/gitaly.md @@ -11,18 +11,15 @@ should consider using Gitaly on a separate node. See the [Gitaly HA Epic](https://gitlab.com/groups/gitlab-org/-/epics/289) to track plans and progress toward high availability support. -This document is relevant for [Scaled Architecture](README.md#scalable-architecture-examples) -environments and [High Availability Architecture](README.md#high-availability-architecture-examples). +This document is relevant for [Scalable and Highly Available Setups](README.md). ## Running Gitaly on its own server See [Running Gitaly on its own server](../gitaly/index.md#running-gitaly-on-its-own-server) in Gitaly documentation. -Continue configuration of other components by going back to: - -- [Scaled Architectures](README.md#scalable-architecture-examples) -- [High Availability Architectures](README.md#high-availability-architecture-examples) +Continue configuration of other components by going back to the +[Scaling and High Availability](README.md#gitlab-components-and-configuration-instructions) page. ## Enable Monitoring diff --git a/doc/administration/high_availability/object_storage.md b/doc/administration/high_availability/object_storage.md new file mode 100644 index 0000000000000000000000000000000000000000..6ec34ea2f5dc3b8e1e6827eab6bbd988a7380464 --- /dev/null +++ b/doc/administration/high_availability/object_storage.md @@ -0,0 +1,28 @@ +--- +type: reference +--- + +# Cloud Object Storage + +GitLab supports utilizing a Cloud Object Storage service over [NFS](nfs.md) for holding +numerous types of data. This is recommended in larger setups as object storage is +typically much more performant and reliable. + +For configuring GitLab to use Object Storage refer to the following guides: + +1. Make sure the [`git` user home directory](https://docs.gitlab.com/omnibus/settings/configuration.html#moving-the-home-directory-for-a-user) is on local disk. +1. Configure [database lookup of SSH keys](../operations/fast_ssh_key_lookup.md) + to eliminate the need for a shared `authorized_keys` file. +1. Configure [object storage for job artifacts](../job_artifacts.md#using-object-storage) + including [incremental logging](../job_logs.md#new-incremental-logging-architecture). +1. Configure [object storage for LFS objects](../lfs/lfs_administration.md#storing-lfs-objects-in-remote-object-storage). +1. Configure [object storage for uploads](../uploads.md#using-object-storage-core-only). +1. Configure [object storage for merge request diffs](../merge_request_diffs.md#using-object-storage). +1. Configure [object storage for packages](../packages/index.md#using-object-storage) (optional feature). +1. Configure [object storage for dependency proxy](../packages/dependency_proxy.md#using-object-storage) (optional feature). + +NOTE: **Note:** +One current feature of GitLab that still requires a shared directory (NFS) is +[GitLab Pages](../../user/project/pages/index.md). +There is [work in progress](https://gitlab.com/gitlab-org/gitlab-pages/issues/196) +to eliminate the need for NFS to support GitLab Pages. diff --git a/doc/administration/high_availability/redis.md b/doc/administration/high_availability/redis.md index 539d492632fda091984c8f34b514f92511217eff..79082fefdd943fe27c876a1beb3bef24255831a4 100644 --- a/doc/administration/high_availability/redis.md +++ b/doc/administration/high_availability/redis.md @@ -20,11 +20,9 @@ The following are the requirements for providing your own Redis instance: Note the Redis node's IP address or hostname, port, and password (if required). These will be necessary when configuring the GitLab application servers later. -## Redis in a Scaled Environment +## Redis in a Scaled and Highly Available Environment -This section is relevant for [Scaled Architecture](README.md#scalable-architecture-examples) -environments including [Basic Scaling](README.md#basic-scaling) and -[Full Scaling](README.md#full-scaling). +This section is relevant for [Scalable and Highly Available Setups](README.md). ### Provide your own Redis instance **(CORE ONLY)** @@ -85,22 +83,8 @@ Omnibus: Advanced configuration options are supported and can be added if needed. -Continue configuration of other components by going -[back to Scaled Architectures](README.md#scalable-architecture-examples) - -## Redis with High Availability - -This section is relevant for [High Availability Architecture](README.md#high-availability-architecture-examples) -environments including [Horizontal](README.md#horizontal), -[Hybrid](README.md#hybrid), and -[Fully Distributed](README.md#fully-distributed). - -### Provide your own Redis instance **(CORE ONLY)** - -If you want to use your own deployed Redis instance(s), -see [Provide your own Redis instance](#provide-your-own-redis-instance-core-only) -for more details. However, you can use the GitLab Omnibus package to easily -deploy the bundled Redis. +Continue configuration of other components by going back to the +[Scaling and High Availability](README.md#gitlab-components-and-configuration-instructions) page. ### High Availability with GitLab Omnibus **(PREMIUM ONLY)** diff --git a/doc/administration/troubleshooting/img/AzureAD-basic_SAML.png b/doc/administration/troubleshooting/img/AzureAD-basic_SAML.png index be420b1a3dee55527e07db357f5fffe51374a436..a553dc182ce266f20e28fc724a8c782bfc72531a 100644 Binary files a/doc/administration/troubleshooting/img/AzureAD-basic_SAML.png and b/doc/administration/troubleshooting/img/AzureAD-basic_SAML.png differ diff --git a/doc/api/README.md b/doc/api/README.md index 639d5067dd7d9e1227cb23e7d9887190d72dd903..7a9d86ee718826790e813a7b244c73222f4698af 100644 --- a/doc/api/README.md +++ b/doc/api/README.md @@ -355,7 +355,7 @@ curl --head --header "PRIVATE-TOKEN: <your_access_token>" https://gitlab.example The response will then be: -``` +```http HTTP/1.1 200 OK Cache-Control: no-cache Content-Length: 1103 @@ -415,7 +415,7 @@ curl --request GET --header "PRIVATE-TOKEN: <your_access_token>" "https://gitlab The response header includes a link to the next page. For example: -``` +```http HTTP/1.1 200 OK ... Link: <https://gitlab.example.com/api/v4/projects?pagination=keyset&per_page=50&order_by=id&sort=asc&id_after=42>; rel="next" @@ -540,7 +540,7 @@ Such errors appear in two cases: When an attribute is missing, you will get something like: -``` +```http HTTP/1.1 400 Bad Request Content-Type: application/json { @@ -551,7 +551,7 @@ Content-Type: application/json When a validation error occurs, error messages will be different. They will hold all details of validation errors: -``` +```http HTTP/1.1 400 Bad Request Content-Type: application/json { @@ -589,7 +589,7 @@ follows: When you try to access an API URL that does not exist you will receive 404 Not Found. -``` +```http HTTP/1.1 404 Not Found Content-Type: application/json { diff --git a/doc/api/pipeline_triggers.md b/doc/api/pipeline_triggers.md index e207ff8e98a8db659686e16652aafbebedde47c8..55c6e37c164043ebbf01bd0716064dd45a090268 100644 --- a/doc/api/pipeline_triggers.md +++ b/doc/api/pipeline_triggers.md @@ -6,7 +6,7 @@ You can read more about [triggering pipelines through the API](../ci/triggers/RE Get a list of project's build triggers. -``` +```plaintext GET /projects/:id/triggers ``` @@ -14,7 +14,7 @@ GET /projects/:id/triggers |-----------|---------|----------|---------------------| | `id` | integer/string | yes | The ID or [URL-encoded path of the project](README.md#namespaced-path-encoding) owned by the authenticated user | -``` +```shell curl --header "PRIVATE-TOKEN: <your_access_token>" "https://gitlab.example.com/api/v4/projects/1/triggers" ``` @@ -36,7 +36,7 @@ curl --header "PRIVATE-TOKEN: <your_access_token>" "https://gitlab.example.com/a Get details of project's build trigger. -``` +```plaintext GET /projects/:id/triggers/:trigger_id ``` @@ -45,7 +45,7 @@ GET /projects/:id/triggers/:trigger_id | `id` | integer/string | yes | The ID or [URL-encoded path of the project](README.md#namespaced-path-encoding) owned by the authenticated user | | `trigger_id` | integer | yes | The trigger id | -``` +```shell curl --header "PRIVATE-TOKEN: <your_access_token>" "https://gitlab.example.com/api/v4/projects/1/triggers/5" ``` @@ -65,7 +65,7 @@ curl --header "PRIVATE-TOKEN: <your_access_token>" "https://gitlab.example.com/a Create a trigger for a project. -``` +```plaintext POST /projects/:id/triggers ``` @@ -74,7 +74,7 @@ POST /projects/:id/triggers | `id` | integer/string | yes | The ID or [URL-encoded path of the project](README.md#namespaced-path-encoding) owned by the authenticated user | | `description` | string | yes | The trigger name | -``` +```shell curl --request POST --header "PRIVATE-TOKEN: <your_access_token>" --form description="my description" "https://gitlab.example.com/api/v4/projects/1/triggers" ``` @@ -94,7 +94,7 @@ curl --request POST --header "PRIVATE-TOKEN: <your_access_token>" --form descrip Update a trigger for a project. -``` +```plaintext PUT /projects/:id/triggers/:trigger_id ``` @@ -104,7 +104,7 @@ PUT /projects/:id/triggers/:trigger_id | `trigger_id` | integer | yes | The trigger id | | `description` | string | no | The trigger name | -``` +```shell curl --request PUT --header "PRIVATE-TOKEN: <your_access_token>" --form description="my description" "https://gitlab.example.com/api/v4/projects/1/triggers/10" ``` @@ -124,7 +124,7 @@ curl --request PUT --header "PRIVATE-TOKEN: <your_access_token>" --form descript Remove a project's build trigger. -``` +```plaintext DELETE /projects/:id/triggers/:trigger_id ``` @@ -133,6 +133,6 @@ DELETE /projects/:id/triggers/:trigger_id | `id` | integer/string | yes | The ID or [URL-encoded path of the project](README.md#namespaced-path-encoding) owned by the authenticated user | | `trigger_id` | integer | yes | The trigger id | -``` +```shell curl --request DELETE --header "PRIVATE-TOKEN: <your_access_token>" "https://gitlab.example.com/api/v4/projects/1/triggers/5" ``` diff --git a/doc/api/releases/links.md b/doc/api/releases/links.md index 2a9e0ccb664ac80d770d9ea6d40a03a717b7ab73..ed428b0fe75eaa0415db3e4660e0ad978d709b3e 100644 --- a/doc/api/releases/links.md +++ b/doc/api/releases/links.md @@ -9,7 +9,7 @@ GitLab supports links links to `http`, `https`, and `ftp` assets. Get assets as links from a Release. -``` +```plaintext GET /projects/:id/releases/:tag_name/assets/links ``` @@ -47,7 +47,7 @@ Example response: Get an asset as a link from a Release. -``` +```plaintext GET /projects/:id/releases/:tag_name/assets/links/:link_id ``` @@ -78,7 +78,7 @@ Example response: Create an asset as a link from a Release. -``` +```plaintext POST /projects/:id/releases/:tag_name/assets/links ``` @@ -114,7 +114,7 @@ Example response: Update an asset as a link from a Release. -``` +```plaintext PUT /projects/:id/releases/:tag_name/assets/links/:link_id ``` @@ -150,7 +150,7 @@ Example response: Delete an asset as a link from a Release. -``` +```plaintext DELETE /projects/:id/releases/:tag_name/assets/links/:link_id ``` diff --git a/doc/api/scim.md b/doc/api/scim.md index cdd635d0627ee7d06595472bbcafb2d63a4bdd7d..eaa56b0d0dd08a000c58a2f57f42b36f96cc0501 100644 --- a/doc/api/scim.md +++ b/doc/api/scim.md @@ -122,7 +122,7 @@ Parameters: | `userName` | string | yes | Username of the user. | | `emails` | JSON string | yes | Work email. | | `name` | JSON string | yes | Name of the user. | -| `meta` | string | no | Resource type (`User'). | +| `meta` | string | no | Resource type (`User`). | Example request: diff --git a/doc/api/search.md b/doc/api/search.md index 78f68ed20e3a7974dd43e2fed665af43c16842a5..640a98117e0be5e440cbfebbf40bf3c3354351ef 100644 --- a/doc/api/search.md +++ b/doc/api/search.md @@ -751,7 +751,7 @@ Search within the specified project. If a user is not a member of a project and the project is private, a `GET` request on that project will result to a `404` status code. -``` +```plaintext GET /projects/:id/search ``` diff --git a/doc/api/services.md b/doc/api/services.md index 0d74915090933644cfc17ffef6d616ca555b20e7..061dd3f4eadcf36213234a3b9a34b9f9e63cfba4 100644 --- a/doc/api/services.md +++ b/doc/api/services.md @@ -1145,7 +1145,7 @@ Parameters: | `merge_requests_events` | boolean | false | Enable notifications for merge request events | | `tag_push_events` | boolean | false | Enable notifications for tag push events | | `note_events` | boolean | false | Enable notifications for note events | -| `confidental_note_events` | boolean | false | Enable notifications for confidential note events | +| `confidential_note_events` | boolean | false | Enable notifications for confidential note events | | `pipeline_events` | boolean | false | Enable notifications for pipeline events | | `wiki_page_events` | boolean | false | Enable notifications for wiki page events | diff --git a/doc/api/settings.md b/doc/api/settings.md index 5ec05f39224468d6006460ec6696d17062244f3d..1996f1ce1440c6f92630ad1cf522fe4eb9ef8dd5 100644 --- a/doc/api/settings.md +++ b/doc/api/settings.md @@ -10,7 +10,7 @@ administrator in order to perform this action. List the current [application settings](#list-of-settings-that-can-be-accessed-via-api-calls) of the GitLab instance. -``` +```plaintext GET /application/settings ``` @@ -90,7 +90,7 @@ the `file_template_project_id`, `deletion_adjourned_period`, or the `geo_node_al Use an API call to modify GitLab instance [application settings](#list-of-settings-that-can-be-accessed-via-api-calls). -``` +```plaintext PUT /application/settings ``` diff --git a/doc/api/sidekiq_metrics.md b/doc/api/sidekiq_metrics.md index 76aa04077c77c5a08f510d9c91551cc0c2bc46bb..5350feff4e37d61e3bf05fd7e7c541982e58425c 100644 --- a/doc/api/sidekiq_metrics.md +++ b/doc/api/sidekiq_metrics.md @@ -10,7 +10,7 @@ of Sidekiq, its jobs, queues, and processes. List information about all the registered queues, their backlog and their latency. -``` +```plaintext GET /sidekiq/queue_metrics ``` @@ -35,7 +35,7 @@ Example response: List information about all the Sidekiq workers registered to process your queues. -``` +```plaintext GET /sidekiq/process_metrics ``` @@ -77,7 +77,7 @@ Example response: List information about the jobs that Sidekiq has performed. -``` +```plaintext GET /sidekiq/job_stats ``` @@ -102,7 +102,7 @@ Example response: List all the currently available information about Sidekiq. -``` +```plaintext GET /sidekiq/compound_metrics ``` diff --git a/doc/api/statistics.md b/doc/api/statistics.md index c7713ab2dae9bd1ae267d2540e5e4f415cb3fd12..883a7640cf8a4432c3d1dafe432abf161fe11f95 100644 --- a/doc/api/statistics.md +++ b/doc/api/statistics.md @@ -8,7 +8,7 @@ administrator in order to perform this action. NOTE: **Note:** These statistics are approximate. -``` +```plaintext GET /application/statistics ``` diff --git a/doc/api/suggestions.md b/doc/api/suggestions.md index f95ab82848ad2481bf46fe5d40b5f9846a6805b1..84bafd3c1ead604ed8629ce9eae9f719e92eb6ad 100644 --- a/doc/api/suggestions.md +++ b/doc/api/suggestions.md @@ -7,7 +7,7 @@ Every API call to suggestions must be authenticated. Applies a suggested patch in a merge request. Users must be at least [Developer](../user/permissions.md) to perform such action. -``` +```plaintext PUT /suggestions/:id/apply ``` diff --git a/doc/api/system_hooks.md b/doc/api/system_hooks.md index 1e34adc5320614430e50fa9241cbb3a341b33bc3..cd69a6a6b34c25a45d16d4948b31bda2228c0a04 100644 --- a/doc/api/system_hooks.md +++ b/doc/api/system_hooks.md @@ -11,7 +11,7 @@ Read more about [system hooks](../system_hooks/system_hooks.md). Get a list of all system hooks. -``` +```plaintext GET /hooks ``` @@ -42,7 +42,7 @@ Example response: Add a new system hook. -``` +```plaintext POST /hooks ``` @@ -81,7 +81,7 @@ Example response: ## Test system hook -``` +```plaintext GET /hooks/:id ``` @@ -112,7 +112,7 @@ Example response: Deletes a system hook. -``` +```plaintext DELETE /hooks/:id ``` diff --git a/doc/api/tags.md b/doc/api/tags.md index a796b7583280143d08819d3739b9bb15782524fa..0a0490e072e2e9a2ac9302faf830693c00c18653 100644 --- a/doc/api/tags.md +++ b/doc/api/tags.md @@ -6,7 +6,7 @@ Get a list of repository tags from a project, sorted by name in reverse alphabetical order. This endpoint can be accessed without authentication if the repository is publicly accessible. -``` +```plaintext GET /projects/:id/repository/tags ``` @@ -57,7 +57,7 @@ Parameters: Get a specific repository tag determined by its name. This endpoint can be accessed without authentication if the repository is publicly accessible. -``` +```plaintext GET /projects/:id/repository/tags/:tag_name ``` @@ -104,7 +104,7 @@ Example Response: Creates a new tag in the repository that points to the supplied ref. -``` +```plaintext POST /projects/:id/repository/tags ``` @@ -164,7 +164,7 @@ status code `405` with an explaining error message is returned. Deletes a tag of a repository with given name. -``` +```plaintext DELETE /projects/:id/repository/tags/:tag_name ``` @@ -178,7 +178,7 @@ Parameters: Add release notes to the existing Git tag. If there already exists a release for the given tag, status code `409` is returned. -``` +```plaintext POST /projects/:id/repository/tags/:tag_name/release ``` @@ -210,7 +210,7 @@ Response: Updates the release notes of a given release. -``` +```plaintext PUT /projects/:id/repository/tags/:tag_name/release ``` diff --git a/doc/api/templates/dockerfiles.md b/doc/api/templates/dockerfiles.md index 4453d3692c792b8dacb8ce313e58c75ce3aafab8..6e693a405b6e5ac38b9c55cfb0104b5683f794b2 100644 --- a/doc/api/templates/dockerfiles.md +++ b/doc/api/templates/dockerfiles.md @@ -12,7 +12,7 @@ information on Dockerfiles, see the Get all Dockerfile templates. -``` +```plaintext GET /templates/dockerfiles ``` @@ -99,7 +99,7 @@ Example response: Get a single Dockerfile template. -``` +```plaintext GET /templates/dockerfiles/:key ``` diff --git a/doc/api/templates/licenses.md b/doc/api/templates/licenses.md index 0b95e4d806508957abb4114c6ed065b4dd6cbd3a..f66fb70e10815bfa9124651296dcd9dab544547d 100644 --- a/doc/api/templates/licenses.md +++ b/doc/api/templates/licenses.md @@ -13,7 +13,7 @@ resources available online. Get all license templates. -``` +```plaintext GET /templates/licenses ``` @@ -110,7 +110,7 @@ Example response: Get a single license template. You can pass parameters to replace the license placeholder. -``` +```plaintext GET /templates/licenses/:key ``` diff --git a/doc/api/todos.md b/doc/api/todos.md index a83b045f9a46cc6b5dfc6fc4b6e82f6668d66148..058009b0e3bfc5ded95a2a0c0e566e0f754398c1 100644 --- a/doc/api/todos.md +++ b/doc/api/todos.md @@ -7,7 +7,7 @@ Returns a list of todos. When no filter is applied, it returns all pending todos for the current user. Different filters allow the user to precise the request. -``` +```plaintext GET /todos ``` @@ -184,7 +184,7 @@ Example Response: Marks a single pending todo given by its ID for the current user as done. The todo marked as done is returned in the response. -``` +```plaintext POST /todos/:id/mark_as_done ``` @@ -280,7 +280,7 @@ Example Response: Marks all pending todos for the current user as done. It returns the HTTP status code `204` with an empty response. -``` +```plaintext POST /todos/mark_as_done ``` diff --git a/doc/api/users.md b/doc/api/users.md index 7952a703e470be3826c6efbb7f7ec4bb77ed43af..929ad1248be1121c23a00ce7b389ccac4eea01ea 100644 --- a/doc/api/users.md +++ b/doc/api/users.md @@ -10,7 +10,7 @@ This function takes pagination parameters `page` and `per_page` to restrict the ### For normal users -``` +```plaintext GET /users ``` @@ -39,13 +39,13 @@ You can also search for users by name or primary email using `?search=`. For exa In addition, you can lookup users by username: -``` +```plaintext GET /users?username=:username ``` For example: -``` +```plaintext GET /users?username=jack_smith ``` @@ -53,11 +53,11 @@ In addition, you can filter users based on states eg. `blocked`, `active` This works only to filter users who are `blocked` or `active`. It does not support `active=false` or `blocked=false`. -``` +```plaintext GET /users?active=true ``` -``` +```plaintext GET /users?blocked=true ``` @@ -66,7 +66,7 @@ Username search is case insensitive. ### For admins -``` +```plaintext GET /users ``` @@ -187,13 +187,13 @@ the `group_saml` provider option: You can lookup users by external UID and provider: -``` +```plaintext GET /users?extern_uid=:extern_uid&provider=:provider ``` For example: -``` +```plaintext GET /users?extern_uid=1234567&provider=github ``` @@ -201,19 +201,19 @@ You can search for users who are external with: `/users?external=true` You can search users by creation date time range with: -``` +```plaintext GET /users?created_before=2001-01-02T00:00:00.060Z&created_after=1999-01-02T00:00:00.060 ``` You can filter by [custom attributes](custom_attributes.md) with: -``` +```plaintext GET /users?custom_attributes[key]=value&custom_attributes[other_key]=other_value ``` You can include the users' [custom attributes](custom_attributes.md) in the response with: -``` +```plaintext GET /users?with_custom_attributes=true ``` @@ -223,7 +223,7 @@ Get a single user. ### For user -``` +```plaintext GET /users/:id ``` @@ -253,7 +253,7 @@ Parameters: ### For admin -``` +```plaintext GET /users/:id ``` @@ -340,7 +340,7 @@ see the `group_saml` option: You can include the user's [custom attributes](custom_attributes.md) in the response with: -``` +```plaintext GET /users/:id?with_custom_attributes=true ``` @@ -358,7 +358,7 @@ over `password`. In addition, `reset_password` and NOTE: **Note:** From [GitLab 12.1](https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/29888/), `private_profile` will default to `false`. -``` +```plaintext POST /users ``` @@ -399,7 +399,7 @@ Parameters: Modifies an existing user. Only administrators can change attributes of a user. -``` +```plaintext PUT /users/:id ``` @@ -445,7 +445,7 @@ For example, when renaming the email address to some existing one. Deletes a user's authentication identity using the provider name associated with that identity. Available only for administrators. -``` +```plaintext DELETE /users/:id/identities/:provider ``` @@ -459,7 +459,7 @@ Parameters: Deletes a user. Available only for administrators. This returns a `204 No Content` status code if the operation was successfully, `404` if the resource was not found or `409` if the user cannot be soft deleted. -``` +```plaintext DELETE /users/:id ``` @@ -474,7 +474,7 @@ Parameters: Gets currently authenticated user. -``` +```plaintext GET /user ``` @@ -522,7 +522,7 @@ Parameters: - `sudo` (optional) - the ID of a user to make the call in their place -``` +```plaintext GET /user ``` @@ -571,7 +571,7 @@ GET /user Get the status of the currently signed in user. -``` +```plaintext GET /user/status ``` @@ -593,7 +593,7 @@ Example response: Get the status of a user. -``` +```plaintext GET /users/:id_or_username/status ``` @@ -619,7 +619,7 @@ Example response: Set the status of the current user. -``` +```plaintext PUT /user/status ``` @@ -652,7 +652,7 @@ Get the counts (same as in top right menu) of the currently signed in user. | --------- | ---- | ----------- | | `merge_requests` | number | Merge requests that are active and assigned to current user. | -``` +```plaintext GET /user_counts ``` @@ -676,7 +676,7 @@ Please refer to the [List of user projects](projects.md#list-user-projects). Get a list of currently authenticated user's SSH keys. -``` +```plaintext GET /user/keys ``` @@ -705,7 +705,7 @@ Parameters: Get a list of a specified user's SSH keys. -``` +```plaintext GET /users/:id_or_username/keys ``` @@ -717,7 +717,7 @@ GET /users/:id_or_username/keys Get a single key. -``` +```plaintext GET /user/keys/:key_id ``` @@ -738,7 +738,7 @@ Parameters: Creates a new key owned by the currently authenticated user. -``` +```plaintext POST /user/keys ``` @@ -776,7 +776,7 @@ error occurs a `400 Bad Request` is returned with a message explaining the error Create new key owned by specified user. Available only for admin -``` +```plaintext POST /users/:id/keys ``` @@ -791,7 +791,7 @@ Parameters: Deletes key owned by currently authenticated user. This returns a `204 No Content` status code if the operation was successfully or `404` if the resource was not found. -``` +```plaintext DELETE /user/keys/:key_id ``` @@ -803,7 +803,7 @@ Parameters: Deletes key owned by a specified user. Available only for admin. -``` +```plaintext DELETE /users/:id/keys/:key_id ``` @@ -816,7 +816,7 @@ Parameters: Get a list of currently authenticated user's GPG keys. -``` +```plaintext GET /user/gpg_keys ``` @@ -840,7 +840,7 @@ Example response: Get a specific GPG key of currently authenticated user. -``` +```plaintext GET /user/gpg_keys/:key_id ``` @@ -868,7 +868,7 @@ Example response: Creates a new GPG key owned by the currently authenticated user. -``` +```plaintext POST /user/gpg_keys ``` @@ -898,7 +898,7 @@ Example response: Delete a GPG key owned by currently authenticated user. -``` +```plaintext DELETE /user/gpg_keys/:key_id ``` @@ -918,7 +918,7 @@ Returns `204 No Content` on success, or `404 Not found` if the key cannot be fou Get a list of a specified user's GPG keys. Available only for admins. -``` +```plaintext GET /users/:id/gpg_keys ``` @@ -948,7 +948,7 @@ Example response: Get a specific GPG key for a given user. Available only for admins. -``` +```plaintext GET /users/:id/gpg_keys/:key_id ``` @@ -977,7 +977,7 @@ Example response: Create new GPG key owned by the specified user. Available only for admins. -``` +```plaintext POST /users/:id/gpg_keys ``` @@ -1008,7 +1008,7 @@ Example response: Delete a GPG key owned by a specified user. Available only for admins. -``` +```plaintext DELETE /users/:id/gpg_keys/:key_id ``` @@ -1027,7 +1027,7 @@ curl --request DELETE --header "PRIVATE-TOKEN: <your_access_token>" https://gitl Get a list of currently authenticated user's emails. -``` +```plaintext GET /user/emails ``` @@ -1052,7 +1052,7 @@ Parameters: Get a list of a specified user's emails. Available only for admin -``` +```plaintext GET /users/:id/emails ``` @@ -1064,7 +1064,7 @@ Parameters: Get a single email. -``` +```plaintext GET /user/emails/:email_id ``` @@ -1083,7 +1083,7 @@ Parameters: Creates a new email owned by the currently authenticated user. -``` +```plaintext POST /user/emails ``` @@ -1115,7 +1115,7 @@ error occurs a `400 Bad Request` is returned with a message explaining the error Create new email owned by specified user. Available only for admin -``` +```plaintext POST /users/:id/emails ``` @@ -1130,7 +1130,7 @@ Parameters: Deletes email owned by currently authenticated user. This returns a `204 No Content` status code if the operation was successfully or `404` if the resource was not found. -``` +```plaintext DELETE /user/emails/:email_id ``` @@ -1142,7 +1142,7 @@ Parameters: Deletes email owned by a specified user. Available only for admin. -``` +```plaintext DELETE /users/:id/emails/:email_id ``` @@ -1155,7 +1155,7 @@ Parameters: Blocks the specified user. Available only for admin. -``` +```plaintext POST /users/:id/block ``` @@ -1170,7 +1170,7 @@ Will return `201 OK` on success, `404 User Not Found` is user cannot be found or Unblocks the specified user. Available only for admin. -``` +```plaintext POST /users/:id/unblock ``` @@ -1187,7 +1187,7 @@ Will return `201 OK` on success, `404 User Not Found` is user cannot be found or Deactivates the specified user. Available only for admin. -``` +```plaintext POST /users/:id/deactivate ``` @@ -1209,7 +1209,7 @@ Returns: Activates the specified user. Available only for admin. -``` +```plaintext POST /users/:id/activate ``` @@ -1234,7 +1234,7 @@ Please refer to the [Events API documentation](events.md#get-user-contribution-e It retrieves every impersonation token of the user. Use the pagination parameters `page` and `per_page` to restrict the list of impersonation tokens. -``` +```plaintext GET /users/:user_id/impersonation_tokens ``` @@ -1245,7 +1245,7 @@ Parameters: | `user_id` | integer | yes | The ID of the user | | `state` | string | no | filter tokens based on state (`all`, `active`, `inactive`) | -``` +```shell curl --header "PRIVATE-TOKEN: <your_access_token>" https://gitlab.example.com/api/v4/users/42/impersonation_tokens ``` @@ -1286,7 +1286,7 @@ Example response: It shows a user's impersonation token. -``` +```plaintext GET /users/:user_id/impersonation_tokens/:impersonation_token_id ``` @@ -1297,7 +1297,7 @@ Parameters: | `user_id` | integer | yes | The ID of the user | | `impersonation_token_id` | integer | yes | The ID of the impersonation token | -``` +```shell curl --header "PRIVATE-TOKEN: <your_access_token>" https://gitlab.example.com/api/v4/users/42/impersonation_tokens/2 ``` @@ -1328,7 +1328,7 @@ You are only able to create impersonation tokens to impersonate the user and per both API calls and Git reads and writes. The user will not see these tokens in their profile settings page. -``` +```plaintext POST /users/:user_id/impersonation_tokens ``` @@ -1339,7 +1339,7 @@ POST /users/:user_id/impersonation_tokens | `expires_at` | date | no | The expiration date of the impersonation token in ISO format (`YYYY-MM-DD`)| | `scopes` | array | yes | The array of scopes of the impersonation token (`api`, `read_user`) | -``` +```shell curl --request POST --header "PRIVATE-TOKEN: <your_access_token>" --data "name=mytoken" --data "expires_at=2017-04-04" --data "scopes[]=api" https://gitlab.example.com/api/v4/users/42/impersonation_tokens ``` @@ -1367,11 +1367,11 @@ Example response: It revokes an impersonation token. -``` +```plaintext DELETE /users/:user_id/impersonation_tokens/:impersonation_token_id ``` -``` +```shell curl --request DELETE --header "PRIVATE-TOKEN: <your_access_token>" https://gitlab.example.com/api/v4/users/42/impersonation_tokens/1 ``` @@ -1398,7 +1398,7 @@ The activities that update the timestamp are: By default, it shows the activity for all users in the last 6 months, but this can be amended by using the `from` parameter. -``` +```plaintext GET /user/activities ``` diff --git a/doc/api/version.md b/doc/api/version.md index a89b88782987a67fb6cbf25ddae98794c72f7785..6c9ff6ac9e1040560542077ba4ee0c6311f16006 100644 --- a/doc/api/version.md +++ b/doc/api/version.md @@ -5,7 +5,7 @@ Retrieve version information for this GitLab instance. Responds `200 OK` for authenticated users. -``` +```plaintext GET /version ``` diff --git a/doc/api/visual_review_discussions.md b/doc/api/visual_review_discussions.md index 3d1c5e5c4c8fde5246a8d769cd7a1511cfc3fa44..161f84f46186599c08d433d4f2338ed615aaab4b 100644 --- a/doc/api/visual_review_discussions.md +++ b/doc/api/visual_review_discussions.md @@ -10,7 +10,7 @@ feedback from [Visual Reviews](../ci/review_apps/index.md#visual-reviews-starter Creates a new thread to a single project merge request. This is similar to creating a note but other comments (replies) can be added to it later. -``` +```plaintext POST /projects/:id/merge_requests/:merge_request_iid/visual_review_discussions ``` diff --git a/doc/api/vulnerability_findings.md b/doc/api/vulnerability_findings.md index 833a46ccce57e968edf786c04c2879e67b6ef982..d1d4966f0f08f00fb78c3b8fd92484510be291b3 100644 --- a/doc/api/vulnerability_findings.md +++ b/doc/api/vulnerability_findings.md @@ -34,7 +34,7 @@ Read more on [pagination](README.md#pagination). List all of a project's vulnerability findings. -``` +```plaintext GET /projects/:id/vulnerability_findings GET /projects/:id/vulnerability_findings?report_type=sast GET /projects/:id/vulnerability_findings?report_type=container_scanning diff --git a/doc/api/wikis.md b/doc/api/wikis.md index 6cde2ebb7a7b5b7de932d6586a36a91e3f921f51..cdaf95fc291a5c6dbe4ed363530467ac962dcdc0 100644 --- a/doc/api/wikis.md +++ b/doc/api/wikis.md @@ -8,7 +8,7 @@ Available only in APIv4. Get all wiki pages for a given project. -``` +```plaintext GET /projects/:id/wikis ``` @@ -49,7 +49,7 @@ Example response: Get a wiki page for a given project. -``` +```plaintext GET /projects/:id/wikis/:slug ``` @@ -77,7 +77,7 @@ Example response: Creates a new wiki page for the given repository with the given title, slug, and content. -``` +```plaintext POST /projects/:id/wikis ``` @@ -107,7 +107,7 @@ Example response: Updates an existing wiki page. At least one parameter is required to update the wiki page. -``` +```plaintext PUT /projects/:id/wikis/:slug ``` @@ -138,7 +138,7 @@ Example response: Deletes a wiki page with a given slug. -``` +```plaintext DELETE /projects/:id/wikis/:slug ``` @@ -160,7 +160,7 @@ On success the HTTP status code is `204` and no JSON response is expected. Uploads a file to the attachment folder inside the wiki's repository. The attachment folder is the `uploads` folder. -``` +```plaintext POST /projects/:id/wikis/attachments ``` diff --git a/doc/ci/caching/index.md b/doc/ci/caching/index.md index 0109d87921b394161a0f2a8e9a33609a2c4744ab..a60310076a87382a62fd99d225fd0afd7ecbe783 100644 --- a/doc/ci/caching/index.md +++ b/doc/ci/caching/index.md @@ -206,10 +206,11 @@ templates](https://gitlab.com/gitlab-org/gitlab-foss/tree/master/lib/gitlab/ci/t ### Caching Node.js dependencies -Assuming your project is using [npm](https://www.npmjs.com/) or -[Yarn](https://classic.yarnpkg.com/en/) to install the Node.js dependencies, the -following example defines `cache` globally so that all jobs inherit it. -Node.js modules are installed in `node_modules/` and are cached per-branch: +Assuming your project is using [npm](https://www.npmjs.com/) to install the Node.js +dependencies, the following example defines `cache` globally so that all jobs inherit it. +By default, npm stores cache data in the home folder `~/.npm` but since +[you can't cache things outside of the project directory](../yaml/README.md#cachepaths), +we tell npm to use `./.npm` instead, and it is cached per-branch: ```yaml # @@ -221,10 +222,10 @@ image: node:latest cache: key: ${CI_COMMIT_REF_SLUG} paths: - - node_modules/ + - .npm/ before_script: - - npm install + - npm ci --cache .npm --prefer-offline test_async: script: diff --git a/doc/ci/variables/README.md b/doc/ci/variables/README.md index 643ccd45898240b1001957a4cffede594bb49b7c..c768c833e7cd4c6a297dfc0cdd31fefa229c2be7 100644 --- a/doc/ci/variables/README.md +++ b/doc/ci/variables/README.md @@ -571,9 +571,12 @@ Below you can find supported syntax reference: - `$VARIABLE =~ /^content.*/` - `$VARIABLE_1 !~ /^content.*/` (introduced in GitLab 11.11) - It is possible perform pattern matching against a variable and regular - expression. Expression like this evaluates to truth if matches are found - when using `=~`. It evaluates to truth if matches are not found when `!~` is used. + Variable pattern matching with regular expressions uses the + [RE2 regular expression syntax](https://github.com/google/re2/wiki/Syntax). + Expressions evaluate as `true` if: + + - Matches are found when using `=~`. + - Matches are *not* found when using `!~`. Pattern matching is case-sensitive by default. Use `i` flag modifier, like `/pattern/i` to make a pattern case-insensitive. diff --git a/doc/ci/yaml/README.md b/doc/ci/yaml/README.md index 8931ee43a8a451f82e377485780c771a1738f57c..255ae3f7c13890c8ebb31adebc17d7bf41777c37 100644 --- a/doc/ci/yaml/README.md +++ b/doc/ci/yaml/README.md @@ -857,7 +857,10 @@ In this example, if the first rule: `rules:if` differs slightly from `only:variables` by accepting only a single expression string, rather than an array of them. Any set of expressions to be -evaluated should be conjoined into a single expression using `&&` or `||`. For example: +evaluated should be conjoined into a single expression using `&&` or `||`, and use +the [variable matching syntax](../variables/README.md#supported-syntax). + +For example: ```yaml job: diff --git a/doc/development/README.md b/doc/development/README.md index 6f197ed4099f73289d5f8829a88e5e47a19c43eb..01dff0c20173c21ef8588b37f4d81edf5c629fb8 100644 --- a/doc/development/README.md +++ b/doc/development/README.md @@ -9,8 +9,20 @@ description: 'Learn how to contribute to GitLab.' - Set up GitLab's development environment with [GitLab Development Kit (GDK)](https://gitlab.com/gitlab-org/gitlab-development-kit/blob/master/doc/howto/README.md) - [GitLab contributing guide](contributing/index.md) - - [Issues workflow](contributing/issue_workflow.md) (issue tracker guidelines, triaging, labels, feature proposals, issue weight, regression issues, technical and UX debt) - - [Merge requests workflow](contributing/merge_request_workflow.md) (merge request guidelines, contribution acceptance criteria, definition of done, dependencies) + - [Issues workflow](contributing/issue_workflow.md). For information on: + - Issue tracker guidelines. + - Triaging. + - Labels. + - Feature proposals. + - Issue weight. + - Regression issues. + - Technical or UX debt. + - [Merge requests workflow](contributing/merge_request_workflow.md). For + information on: + - Merge request guidelines. + - Contribution acceptance criteria. + - Definition of done. + - Dependencies. - [Style guides](contributing/style_guides.md) - [Implement design & UI elements](contributing/design.md) - [GitLab Architecture Overview](architecture.md) diff --git a/doc/development/architecture.md b/doc/development/architecture.md index c5ac8c040f8799455e7a81adfb6d37ce8f7fbdd8..5a1b53bc2fb4118e51258b5cd56283587c47895e 100644 --- a/doc/development/architecture.md +++ b/doc/development/architecture.md @@ -288,7 +288,7 @@ GitLab CI is the open-source continuous integration service included with GitLab - Configuration: [Omnibus][grafana-omnibus], [Charts][grafana-charts] - Layer: Monitoring -Grafana is an open source, feature rich metrics dashboard and graph editor for Graphite, Elasticsearch, OpenTSDB, Prometheus and InfluxDB. +Grafana is an open source, feature rich metrics dashboard and graph editor for Graphite, Elasticsearch, OpenTSDB, Prometheus, and InfluxDB. #### Jaeger @@ -321,7 +321,7 @@ Mattermost is an open source, private cloud, Slack-alternative from <https://mat - Configuration: [Omnibus][minio-omnibus], [Charts][minio-charts], [GDK][minio-gdk] - Layer: Core Service (Data) -MinIO is an object storage server released under Apache License v2.0. It is compatible with Amazon S3 cloud storage service. It is best suited for storing unstructured data such as photos, videos, log files, backups and container / VM images. Size of an object can range from a few KBs to a maximum of 5TB. +MinIO is an object storage server released under Apache License v2.0. It is compatible with Amazon S3 cloud storage service. It is best suited for storing unstructured data such as photos, videos, log files, backups, and container / VM images. Size of an object can range from a few KBs to a maximum of 5TB. #### NGINX diff --git a/doc/development/code_review.md b/doc/development/code_review.md index d5394e30d6a01864cb13d274c090f7fe5ce7d7db..7b00e3cea0deb819144ac8f0081ca0ad501be303 100644 --- a/doc/development/code_review.md +++ b/doc/development/code_review.md @@ -17,7 +17,7 @@ uncovered edge cases. The reviewer can be from a different team, but it is recommended to pick someone who knows the domain well. You can read more about the importance of involving reviewer(s) in the section on the responsibility of the author below. -If you need some guidance (e.g. it's your first merge request), feel free to ask +If you need some guidance (for example, it's your first merge request), feel free to ask one of the [Merge request coaches](https://about.gitlab.com/company/team/). If you need assistance with security scans or comments, feel free to include the @@ -148,7 +148,7 @@ architecture, code organization, separation of concerns, tests, DRYness, consistency, and readability. Since a maintainer's job only depends on their knowledge of the overall GitLab -codebase, and not that of any specific domain, they can review, approve and merge +codebase, and not that of any specific domain, they can review, approve, and merge merge requests from any team and in any product area. In fact, authors are encouraged to get their merge requests merged by maintainers @@ -334,7 +334,7 @@ reviewee. reviewer before doing it, but have the courage to do it when you believe it is important. - In the interest of [Iteration](https://about.gitlab.com/handbook/values/#iteration), - if, as a reviewer, your suggestions are non-blocking changes or personal preference + if your review suggestions are non-blocking changes, or personal preference (not a documented or agreed requirement), consider approving the merge request before passing it back to the author. This allows them to implement your suggestions if they agree, or allows them to pass it onto the diff --git a/doc/development/contributing/design.md b/doc/development/contributing/design.md index 8426db84aa4910e6427071e54a4c367750c53794..352392931c0e977deb1d6347c157ace6213b5f7e 100644 --- a/doc/development/contributing/design.md +++ b/doc/development/contributing/design.md @@ -9,7 +9,11 @@ To better understand the priority by which UX tackles issues, see the [UX sectio Once an issue has been worked on and is ready for development, a UXer removes the ~"UX" label and applies the ~"UX ready" label to that issue. -There is a special type label called ~"product discovery". It represents a discovery issue intended for UX, PM, FE, and BE to discuss the problem and potential solutions. The final output for this issue could be a doc of requirements, a design artifact, or even a prototype. The solution will be developed in a subsequent milestone. +There is a special type label called ~"product discovery" intended for UX, +PM, FE, and BE. It represents a discovery issue to discuss the problem and +potential solutions. The final output for this issue could be a doc of +requirements, a design artifact, or even a prototype. The solution will be +developed in a subsequent milestone. ~"product discovery" issues are like any other issue and should contain a milestone label, ~"Deliverable" or ~"Stretch", when scheduled in the current milestone. @@ -17,7 +21,7 @@ The initial issue should be about the problem we are solving. If a separate [pro is needed for additional research and design work, it will be created by a PM or UX person. Assign the ~UX, ~"product discovery" and ~"Deliverable" labels, add a milestone and use a title that makes it clear that the scheduled issue is product discovery -(e.g. `Product discovery for XYZ`). +(for example, `Product discovery for XYZ`). In order to complete a product discovery issue in a release, you must complete the following: diff --git a/doc/development/database_review.md b/doc/development/database_review.md index 113314884d5d19f2b56dec0e92f6f909502ad9ee..77e5060720baaabe732595d20031677f5f97a46c 100644 --- a/doc/development/database_review.md +++ b/doc/development/database_review.md @@ -12,7 +12,7 @@ A database review is required for: including files in: - `db/` - `lib/gitlab/background_migration/` -- Changes to the database tooling, e.g.: +- Changes to the database tooling. For example: - migration or ActiveRecord helpers in `lib/gitlab/database/` - load balancing - Changes that produce SQL queries that are beyond the obvious. It is @@ -50,7 +50,7 @@ A database **reviewer**'s role is to: Currently we have a [critical shortage of database maintainers](https://gitlab.com/gitlab-org/gitlab/issues/29717). Until we are able to increase the number of database maintainers to support the volume of reviews, we have implemented this temporary solution. If the database **reviewer** cannot find an available database **maintainer** then: 1. Assign the MR for a second review by a **database trainee maintainer** for further review. -1. Once satisfied with the review process, and if the database **maintainer** is still not available, skip the database maintainer approval step and assign the merge request to a backend maintainer for final review and approval. +1. Once satisfied with the review process and if the database **maintainer** is still not available, skip the database maintainer approval step and assign the merge request to a backend maintainer for final review and approval. A database **maintainer**'s role is to: @@ -119,10 +119,10 @@ the following preparations into account. - Add foreign keys to any columns pointing to data in other tables, including [an index](migration_style_guide.md#adding-foreign-key-constraints). - Add indexes for fields that are used in statements such as `WHERE`, `ORDER BY`, `GROUP BY`, and `JOIN`s. -#### Preparation when removing columns, tables, indexes or other structures +#### Preparation when removing columns, tables, indexes, or other structures - Follow the [guidelines on dropping columns](what_requires_downtime.md#dropping-columns). -- Generally it's best practice, but not a hard rule, to remove indexes and foreign keys in a post-deployment migration. +- Generally it's best practice (but not a hard rule) to remove indexes and foreign keys in a post-deployment migration. - Exceptions include removing indexes and foreign keys for small tables. ### How to review for database @@ -156,14 +156,14 @@ the following preparations into account. - Check migrations are reversible and implement a `#down` method - Check data migrations: - Establish a time estimate for execution on GitLab.com. - - Depending on timing, data migrations can be placed on regular, post-deploy or background migrations. + - Depending on timing, data migrations can be placed on regular, post-deploy, or background migrations. - Data migrations should be reversible too or come with a description of how to reverse, when possible. This applies to all types of migrations (regular, post-deploy, background). - Query performance - Check for any obviously complex queries and queries the author specifically points out for review (if any) - If not present yet, ask the author to provide SQL queries and query plans - (e.g. by using [chatops](understanding_explain_plans.md#chatops) or direct + (for example, by using [chatops](understanding_explain_plans.md#chatops) or direct database access) - For given queries, review parameters regarding data distribution - [Check query plans](understanding_explain_plans.md) and suggest improvements diff --git a/doc/development/documentation/index.md b/doc/development/documentation/index.md index 684951785118f2488676e74f02eca90a6b3466c6..4fcdd8a1fb0a6e106e2915aa1321f6e2ec12b7a2 100644 --- a/doc/development/documentation/index.md +++ b/doc/development/documentation/index.md @@ -16,7 +16,7 @@ In addition to this page, the following resources can help you craft and contrib ## Source files and rendered web locations -Documentation for GitLab, GitLab Runner, Omnibus GitLab and Charts is published to <https://docs.gitlab.com>. Documentation for GitLab is also published within the application at `/help` on the domain of the GitLab instance. +Documentation for GitLab, GitLab Runner, Omnibus GitLab, and Charts is published to <https://docs.gitlab.com>. Documentation for GitLab is also published within the application at `/help` on the domain of the GitLab instance. At `/help`, only help for your current edition and version is included. Help for other versions is available at <https://docs.gitlab.com/archives/>. The source of the documentation exists within the codebase of each GitLab application in the following repository locations: diff --git a/doc/development/documentation/site_architecture/index.md b/doc/development/documentation/site_architecture/index.md index 232bca30e0ff7cb5b0d3422104ab807d49d5fb30..c91a9882bb0a62ec3ed923075a02b7f5b1f1fbce 100644 --- a/doc/development/documentation/site_architecture/index.md +++ b/doc/development/documentation/site_architecture/index.md @@ -107,7 +107,7 @@ The pipeline in the `gitlab-docs` project: ### Rebuild the docs site Docker images -Once a week, on Mondays, a scheduled pipeline runs and rebuilds the Docker images +Once a week on Mondays, a scheduled pipeline runs and rebuilds the Docker images used in various pipeline jobs, like `docs-lint`. The Docker image configuration files are located at <https://gitlab.com/gitlab-org/gitlab-docs/-/tree/master/dockerfiles>. @@ -230,7 +230,7 @@ for its search function. This is how it works: NOTE: **For GitLab employees:** The credentials to access the Algolia dashboard are stored in 1Password. If you want to receive weekly reports of the search usage, search the Google doc with -title "Email, Slack, and GitLab Groups and Aliases", search for `docsearch`, +title `Email, Slack, and GitLab Groups and Aliases`, search for `docsearch`, and add a comment with your email to be added to the alias that gets the weekly reports. diff --git a/doc/development/documentation/styleguide.md b/doc/development/documentation/styleguide.md index b456887bd0855702a13b7bf76b2dcd323005d48b..f769560d67f1787be3671d03124f4a817814e49c 100644 --- a/doc/development/documentation/styleguide.md +++ b/doc/development/documentation/styleguide.md @@ -17,14 +17,12 @@ that apply to all GitLab content, not just documentation. ### Why a single source of truth -The documentation is the SSOT for all information related to the implementation, usage, and troubleshooting of GitLab products and features. It evolves continually, in keeping with new products and features, and with improvements for clarity, accuracy, and completeness. +The documentation of GitLab products and features is the SSOT for all information related to implementation, usage, and troubleshooting. It evolves continually, in keeping with new products and features, and with improvements for clarity, accuracy, and completeness. This policy prevents information silos, ensuring that it remains easy to find information about GitLab products. It also informs decisions about the kinds of content we include in our documentation. -The documentation is a continually evolving SSOT for all information related to the implementation, usage, and troubleshooting of GitLab products and features. - ### All information Include problem-solving actions that may address rare cases or be considered 'risky', so long as proper context is provided in the form of fully detailed warnings and caveats. This kind of content should be included as it could be helpful to others and, when properly explained, its benefits outweigh the risks. If you think you have found an exception to this rule, contact the Technical Writing team. @@ -34,7 +32,7 @@ For the Troubleshooting sections, people in GitLab Support can merge additions t ### All media types -Include any media types/sources if the content is relevant to readers. You can freely include or link presentations, diagrams, videos, etc.; no matter who it was originally composed for, if it is helpful to any of our audiences, we can include it. +Include any media types/sources if the content is relevant to readers. You can freely include or link presentations, diagrams, videos, and so on; no matter who it was originally composed for, if it is helpful to any of our audiences, we can include it. - If you use an image that has a separate source file (for example, a vector or diagram format), link the image to the source file so that it may be reused or updated by anyone. - Do not copy and paste content from other sources unless it is a limited quotation with the source cited. Typically it is better to either rephrase relevant information in your own words or link out to the other source. @@ -63,13 +61,17 @@ Instead, link to the SSOT and explain why it is important to consume the informa ### Organize by topic, not by type -Beyond top-level audience-type folders (e.g. `administration`), we organize content by topic, not by type, so that it can be located as easily as possible within the single-source-of-truth (SSOT) section for the subject matter. +Beyond top-level audience-type folders (for example, `administration`), we organize content by topic, not by type, so that it can be located as easily as possible within the single-source-of-truth (SSOT) section for the subject matter. + +For example, do not create groupings of similar media types. For example: -For example, do not create groupings of similar media types (e.g. glossaries, FAQs, or sets of all articles or videos). +- Glossaries. +- FAQs. +- Sets of all articles or videos. Such grouping of content by type makes it difficult to browse for the information you need and difficult to maintain up-to-date content. -Instead, organize content by its subject (e.g. everything related to CI goes together) +Instead, organize content by its subject (for example, everything related to CI goes together) and cross-link between any related content. ### Docs-first methodology @@ -79,7 +81,10 @@ We employ a **docs-first methodology** to help ensure that the docs remain a com - If the answer to a question exists in documentation, share the link to the docs instead of rephrasing the information. - When you encounter new information not available in GitLab’s documentation (for example, when working on a support case or testing a feature), your first step should be to create a merge request (MR) to add this information to the docs. You can then share the MR in order to communicate this information. -New information that would be useful toward the future usage or troubleshooting of GitLab should not be written directly in a forum or other messaging system, but added to a docs MR and then referenced, as described above. Note that among any other doc changes, you can always add a Troubleshooting section to a doc if none exists, or un-comment and use the placeholder Troubleshooting section included as part of our [doc template](structure.md#template-for-new-docs), if present. +New information that would be useful toward the future usage or troubleshooting of GitLab should not be written directly in a forum or other messaging system, but added to a docs MR and then referenced, as described above. Note that among any other doc changes, you can either: + +- Add a Troubleshooting section to a doc if none exists. +- Un-comment and use the placeholder Troubleshooting section included as part of our [doc template](structure.md#template-for-new-docs), if present. The more we reflexively add useful information to the docs, the more (and more successfully) the docs will be used to efficiently accomplish tasks and solve problems. @@ -98,7 +103,7 @@ Ruby gem will support all [GFM markup](../../user/markdown.md) in the future. Th all markup that is supported for display in the GitLab application itself. For now, use regular Markdown markup, following the rules in the linked style guide. -Note that Kramdown-specific markup (e.g., `{:.class}`) will not render properly on GitLab instances under [`/help`](index.md#gitlab-help). +Note that Kramdown-specific markup (for example, `{:.class}`) will not render properly on GitLab instances under [`/help`](index.md#gitlab-help). Hard-coded HTML is valid, although it's discouraged to be used while we have `/help`. HTML is permitted as long as: @@ -1149,7 +1154,7 @@ keyword "only": - For GitLab Premium: `**(PREMIUM ONLY)**`. - For GitLab Ultimate: `**(ULTIMATE ONLY)**`. -For GitLab.com only tiers (when the feature is not available for self-hosted instances): +For GitLab.com only tiers (when the feature is not available for self-managed instances): - For GitLab Free and higher tiers: `**(FREE ONLY)**`. - For GitLab Bronze and higher tiers: `**(BRONZE ONLY)**`. diff --git a/doc/development/documentation/workflow.md b/doc/development/documentation/workflow.md index 7c97f6628c999f0dbad9a35fdfc3fb8fec43af1c..1b8d2ee434a46581b4b48576faeeea5739a1a29b 100644 --- a/doc/development/documentation/workflow.md +++ b/doc/development/documentation/workflow.md @@ -118,7 +118,7 @@ Reviewers help ensure: Prior to merging, documentation changes committed by the developer must be reviewed by: - The code reviewer for the merge request. This is known as a technical review. -- Optionally, others involved in the work, such as other developers or the Product Manager. +- Optionally, others involved in the work such as other developers or the Product Manager. - The Technical Writer for the DevOps stage group, except in exceptional circumstances where a [post-merge review](#post-merge-reviews) can be requested. - A maintainer of the project. @@ -137,11 +137,11 @@ For issues requiring any new or updated documentation, the Product Manager must: - Confirm or add the [documentation requirements](#documentation-requirements). - Ensure the issue contains: - Any new or updated feature name. - - Overview, description, and use cases, as required by the - [documentation structure and template](structure.md), when applicable. + - Overview, description, and use cases when applicable (as required by the + [documentation structure and template](structure.md)). -Everyone is encouraged to draft the documentation requirements in the issue, but a Product Manager -will do the following: +Everyone is encouraged to draft the documentation requirements in the issue. However, a Product +Manager will: - When the issue is assigned a release milestone, review and update the Documentation details. - By the kickoff, finalize the documentation details. @@ -238,7 +238,7 @@ The following details should be included: - What concepts and procedures should the documentation guide and enable the user to understand or accomplish? - To this end, what new page(s) are needed, if any? What pages or subsections need updates? - Consider user, admin, and API documentation changes and additions. + Consider changes and additions to user, admin, and API documentation. - For any guide or instruction set, should it help address a single use case, or be flexible to address a certain range of use cases? - Do we need to update a previously recommended workflow? Should we link the new feature from diff --git a/doc/development/elasticsearch.md b/doc/development/elasticsearch.md index b8d2a873d8b9ce695d3be0646867a8c086c6b7ca..69113fe80308cf6b5c4db162e207037c8dfcd276 100644 --- a/doc/development/elasticsearch.md +++ b/doc/development/elasticsearch.md @@ -36,7 +36,11 @@ Additionally, if you need large repos or multiple forks for testing, please cons The Elasticsearch integration depends on an external indexer. We ship an [indexer written in Go](https://gitlab.com/gitlab-org/gitlab-elasticsearch-indexer). The user must trigger the initial indexing via a rake task but, after this is done, GitLab itself will trigger reindexing when required via `after_` callbacks on create, update, and destroy that are inherited from [/ee/app/models/concerns/elastic/application_versioned_search.rb](https://gitlab.com/gitlab-org/gitlab/blob/master/ee/app/models/concerns/elastic/application_versioned_search.rb). -All indexing after the initial one is done via `ElasticIndexerWorker` (Sidekiq jobs). +After initial indexing is complete, updates proceed in one of two ways, depending on the `:elastic_bulk_incremental_updates` feature flag. + +If disabled, every create, update, or delete operation on an Elasticsearch-tracked model enqueues a new `ElasticIndexerWorker` Sidekiq job which takes care of updating just that document. This is quite inefficient. + +If the feature flag is enabled, create, update, and delete operations for all models except projects (see [#207494](https://gitlab.com/gitlab-org/gitlab/issues/207494)) are tracked in a Redis [`ZSET`](https://redis.io/topics/data-types#sorted-sets) instead. A regular `sidekiq-cron` `ElasticIndexBulkCronWorker` processes this queue, updating many Elasticsearch documents at a time with the [Bulk Request API](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html). Search queries are generated by the concerns found in [ee/app/models/concerns/elastic](https://gitlab.com/gitlab-org/gitlab/tree/master/ee/app/models/concerns/elastic). These concerns are also in charge of access control, and have been a historic source of security bugs so please pay close attention to them! diff --git a/doc/development/event_tracking/index.md b/doc/development/event_tracking/index.md index 10e99e845d69895146679e398d0781a33a233af7..8c00930a7818b6ab2a7aa93a4caeca1fa9233797 100644 --- a/doc/development/event_tracking/index.md +++ b/doc/development/event_tracking/index.md @@ -13,7 +13,7 @@ As developers, we should attempt to add tracking and instrumentation where possi - Usage patterns. - Other metrics that can potentially be improved on. -To maintain consistency, and not adversely effect performance, we have some basic tracking functionality exposed at both the frontend and backend layers that can be utilized while building new features or updating existing features. +To maintain consistency and not adversely effect performance, we have some basic tracking functionality exposed at both the frontend and backend layers that can be utilized while building new features or updating existing features. We also encourage users to enable tracking, and we embrace full transparency with our tracking approach so it can be easily understood and trusted. By enabling tracking, users can: diff --git a/doc/development/fe_guide/development_process.md b/doc/development/fe_guide/development_process.md index 5b02098f020381c071b18dd1c5de69e0c9808bde..64bc01c181cc31cef99756a7a56a138c5848fc4a 100644 --- a/doc/development/fe_guide/development_process.md +++ b/doc/development/fe_guide/development_process.md @@ -71,7 +71,7 @@ With the purpose of being [respectful of others' time](https://about.gitlab.com/ - includes tests - includes a changelog entry (when necessary) - Before assigning to a maintainer, assign to a reviewer. -- If you assigned a merge request, or pinged someone directly, keep in mind that we work in different timezones and asynchronously, so be patient. Unless the merge request is urgent (like fixing a broken master), please don't DM or reassign the merge request before waiting for a 24-hour window. +- If you assigned a merge request or pinged someone directly, be patient because we work in different timezones and asynchronously. Unless the merge request is urgent (like fixing a broken master), please don't DM or reassign the merge request before waiting for a 24-hour window. - If you have a question regarding your merge request/issue, make it on the merge request/issue. When we DM each other, we no longer have a SSOT and [no one else is able to contribute](https://about.gitlab.com/handbook/values/#public-by-default). - When you have a big WIP merge request with many changes, you're advised to get the review started before adding/removing significant code. Make sure it is assigned well before the release cut-off, as the reviewer(s)/maintainer(s) would always prioritize reviewing finished MRs before WIP ones. - Make sure to remove the WIP title before the last round of review. diff --git a/doc/development/fe_guide/graphql.md b/doc/development/fe_guide/graphql.md index a9821edff0b929e9aaeacf8555e0d85286a15a9d..d21c937bfe4eb6e866899c5feb2ca847e5ebd631 100644 --- a/doc/development/fe_guide/graphql.md +++ b/doc/development/fe_guide/graphql.md @@ -53,7 +53,7 @@ fragment DesignListItem on Design { } ``` -Fragments can be stored in separate files, imported and used in queries, mutations or other fragments. +Fragments can be stored in separate files, imported and used in queries, mutations, or other fragments. ```javascript #import "./designList.fragment.graphql" diff --git a/doc/development/fe_guide/vuex.md b/doc/development/fe_guide/vuex.md index 0bb9e3b7d504719a1dc4de70013d21091aef71c1..cd18091abdfa4bf4ead47e9c70e155b0ab959508 100644 --- a/doc/development/fe_guide/vuex.md +++ b/doc/development/fe_guide/vuex.md @@ -6,7 +6,7 @@ _Note:_ All of the below is explained in more detail in the official [Vuex docum ## Separation of concerns -Vuex is composed of State, Getters, Mutations, Actions and Modules. +Vuex is composed of State, Getters, Mutations, Actions, and Modules. When a user clicks on an action, we need to `dispatch` it. This action will `commit` a mutation that will change the state. _Note:_ The action itself will not update the state, only a mutation should update the state. diff --git a/doc/development/feature_flags/process.md b/doc/development/feature_flags/process.md index 4b44c8dadca68780e80a0b81f7b33b0cffeba25e..0cca4117f1f7cb5f4ec1528f0f948023282cc81e 100644 --- a/doc/development/feature_flags/process.md +++ b/doc/development/feature_flags/process.md @@ -53,7 +53,7 @@ absolutely no way to use the feature until it is enabled. ### Including a feature behind feature flag in the final release -In order to build a final release and present the feature for self-hosted +In order to build a final release and present the feature for self-managed users, the feature flag should be at least defaulted to **on**. If the feature is deemed stable and there is confidence that removing the feature flag is safe, consider removing the feature flag altogether. @@ -126,8 +126,11 @@ need to revert a release, and because feature flags are disabled by default we don't need to revert and pick any Git commits. In fact, all we have to do is disable the feature, and in the worst case, perform cleanup. Let's say that the cost of this is 2. In this case, our best case cost is 11: 10 to build the -feature, and 1 to add the feature flag. The worst case cost is now 13: 10 to -build the feature, 1 to add the feature flag, and 2 to disable and clean up. +feature, and 1 to add the feature flag. The worst case cost is now 13: + +- 10 to build the feature. +- 1 to add the feature flag. +- 2 to disable and clean up. Here we can see that in the best case scenario the work necessary is only a tiny bit more compared to not using a feature flag. Meanwhile, the process of diff --git a/doc/development/new_fe_guide/development/accessibility.md b/doc/development/new_fe_guide/development/accessibility.md index ae5c4c6a6cc39b358de8c1da514fec3e107ab3bd..7a15e9eb6be53775466133baa8545198f9a5aed5 100644 --- a/doc/development/new_fe_guide/development/accessibility.md +++ b/doc/development/new_fe_guide/development/accessibility.md @@ -4,7 +4,7 @@ Using semantic HTML plays a key role when it comes to accessibility. ## Accessible Rich Internet Applications - ARIA -WAI-ARIA, the Accessible Rich Internet Applications specification, defines a way to make Web content and Web applications more accessible to people with disabilities. +WAI-ARIA (the Accessible Rich Internet Applications specification) defines a way to make Web content and Web applications more accessible to people with disabilities. > Note: It is [recommended][using-aria] to use semantic elements as the primary method to achieve accessibility rather than adding aria attributes. Adding aria attributes should be seen as a secondary method for creating accessible elements. diff --git a/doc/development/new_fe_guide/index.md b/doc/development/new_fe_guide/index.md index 152ddcdae645f9afebb35017b611a29fc8d99bc3..9e9c367807fae4fc2ad1f989a873e6631e9b5c0d 100644 --- a/doc/development/new_fe_guide/index.md +++ b/doc/development/new_fe_guide/index.md @@ -1,7 +1,7 @@ # Frontend Development Guidelines This guide contains all the information to successfully contribute to GitLab's frontend. -This is a living document, and we welcome contributions, feedback and suggestions. +This is a living document, and we welcome contributions, feedback, and suggestions. ## [Development](development/index.md) diff --git a/doc/development/packages.md b/doc/development/packages.md index 487d1243c9797712d28a7f4f90a8c586c2dac053..848693d368ac5000c052df76af4f83f458346048 100644 --- a/doc/development/packages.md +++ b/doc/development/packages.md @@ -75,8 +75,8 @@ that gives a way to identify the project that the package belongs to. This gener id or full project path in the package name. See [Conan's naming convention](../user/packages/conan_repository/index.md#package-recipe-naming-convention) as an example. -For group and project-level endpoints, naming can be less constrained, and it will be up to the group and project -members to be certain that there is no conflict between two package names, however the system should prevent +For group and project-level endpoints, naming can be less constrained and it will be up to the group and project +members to be certain that there is no conflict between two package names. However, the system should prevent a user from reusing an existing name within a given scope. Otherwise, naming should follow the package manager's naming conventions and include a validation in the `package.md` diff --git a/doc/development/performance.md b/doc/development/performance.md index 1b3c4aedf1f60b270fa347599b1d31eea98dd151..5697f41c3dc7e7ddb3f09b25866a784016516862 100644 --- a/doc/development/performance.md +++ b/doc/development/performance.md @@ -259,10 +259,10 @@ One of the reasons of the increased memory footprint could be Ruby memory fragme To diagnose it, you can visualize Ruby heap as described in [this post by Aaron Patterson](https://tenderlovemaking.com/2017/09/27/visualizing-your-ruby-heap.html). -To start, you want to dump the heap of the process you're investigating to a JSON file. +To start, you want to dump the heap of the process you're investigating to a JSON file. -You need to run the command inside the process you're exploring, you may do that with `rbtrace`. -`rbtrace` is already present in GitLab `Gemfile`, you just need to require it. +You need to run the command inside the process you're exploring, you may do that with `rbtrace`. +`rbtrace` is already present in GitLab `Gemfile`, you just need to require it. It could be achieved running webserver or Sidekiq with the environment variable set to `ENABLE_RBTRACE=1`. To get the heap dump: @@ -281,7 +281,7 @@ Fragmented Ruby heap snapshot could look like this:  -Memory fragmentation could be reduced by tuning GC parameters as described in [this post by Nate Berkopec](https://www.speedshop.co/2017/12/04/malloc-doubles-ruby-memory.html), which should be considered as a tradeoff, as it may affect overall performance of memory allocation and GC cycles. +Memory fragmentation could be reduced by tuning GC parameters as described in [this post by Nate Berkopec](https://www.speedshop.co/2017/12/04/malloc-doubles-ruby-memory.html). This should be considered as a tradeoff, as it may affect overall performance of memory allocation and GC cycles. ## Importance of Changes diff --git a/doc/development/redis.md b/doc/development/redis.md index a4a87155f5aa6b2d1e282f9509f89bc71c8d3798..a8b7b84bb658c5e03f50481af554aad2cab5d583 100644 --- a/doc/development/redis.md +++ b/doc/development/redis.md @@ -8,7 +8,7 @@ GitLab uses [Redis](https://redis.io) for three distinct purposes: Every application process is configured to use the same Redis servers, so they can be used for inter-process communication in cases where [PostgreSQL](sql.md) -is less appropriate, for example, transient state or data that is written much +is less appropriate. For example, transient state or data that is written much more often than it is read. If [Geo](geo.md) is enabled, each Geo node gets its own, independent Redis diff --git a/doc/development/sidekiq_style_guide.md b/doc/development/sidekiq_style_guide.md index 4445efa823a518cb75febc391b2600ac971d7ae2..e15daab0fdb3b4d8467fdefe94c497f58bf4aaf5 100644 --- a/doc/development/sidekiq_style_guide.md +++ b/doc/development/sidekiq_style_guide.md @@ -66,7 +66,7 @@ are not adjusted appropriately. ## Idempotent Jobs -It's known that a job can fail for multiple reasons, for example, network outages or bugs. +It's known that a job can fail for multiple reasons. For example, network outages or bugs. In order to address this, Sidekiq has a built-in retry mechanism that is used by default by most workers within GitLab. @@ -178,7 +178,7 @@ end ## Jobs with External Dependencies Most background jobs in the GitLab application communicate with other GitLab -services, eg Postgres, Redis, Gitaly and Object Storage. These are considered +services. For example, Postgres, Redis, Gitaly, and Object Storage. These are considered to be "internal" dependencies for a job. However, some jobs will be dependent on external services in order to complete @@ -388,7 +388,7 @@ requests. We do this to avoid incorrect metadata when other jobs are scheduled from the cron-worker. Cron-Workers themselves run instance wide, so they aren't scoped to -users, namespaces, projects or other resources that should be added to +users, namespaces, projects, or other resources that should be added to the context. However, they often schedule other jobs that _do_ require context. diff --git a/doc/development/testing_guide/end_to_end/quick_start_guide.md b/doc/development/testing_guide/end_to_end/quick_start_guide.md index be00129a2bc4793355803b2f0ce8e7ad2642b458..5d5715df37267768a26a41b9d53a0b10f8fd1a94 100644 --- a/doc/development/testing_guide/end_to_end/quick_start_guide.md +++ b/doc/development/testing_guide/end_to_end/quick_start_guide.md @@ -2,7 +2,12 @@ In this tutorial, you will find different examples, and the steps involved, in the creation of end-to-end (_e2e_) tests for GitLab CE and GitLab EE, using GitLab QA. -> When referring to end-to-end tests in this document, this means testing a specific feature end-to-end, such as a user logging in, the creation of a project, the management of labels, breaking down epics into sub-epics and issues, etc. +When referring to end-to-end tests in this document, this means testing a specific feature end-to-end such as: + +- A user logging in. +- The creation of a project. +- The management of labels. +- Breaking down epics into sub-epics and issues. ## Important information before we start writing tests @@ -209,7 +214,11 @@ First, we remove the duplication of strings by defining the global variables `@i Then, by creating a reusable `select_label_and_refresh` method, we remove the code duplication of this action, and later we can move this method to a Page Object class that will be created for easier maintenance purposes. -> Notice that the reusable method is created at the bottom of the file. The reason for that is that reading the code should be similar to reading a newspaper, where high-level information is at the top, like the title and summary of the news, while low level, or more specific information, is at the bottom (this helps readability). +Notice that the reusable method is created at the bottom of the file. This helps readability, +where reading the code should be similar to reading a newspaper: + +- High-level information is at the top, like the title and summary of the news. +- Low level, or more specific information, is at the bottom. ### 5. Tests' pre-conditions using resources and Page Objects @@ -353,7 +362,7 @@ You can think of [Resources] as anything that can be created on GitLab CE or EE, With that in mind, resources can be a project, an epic, an issue, a label, a commit, etc. -As you saw in the tests' pre-conditions and the optimization sections, we're already creating some of these resources, and we are doing that by calling the `fabricate_via_api!` method. +As you saw in the tests' pre-conditions and the optimization sections, we're already creating some of these resources. We are doing that by calling the `fabricate_via_api!` method. > We could be using the `fabricate!` method instead, which would use the `fabricate_via_api!` method if it exists, and fallback to GUI fabrication otherwise, but we recommend being explicit to make it clear what the test does. Also, we always recommend fabricating resources via API since this makes tests faster and more reliable. diff --git a/doc/development/testing_guide/testing_levels.md b/doc/development/testing_guide/testing_levels.md index f7dec82724f557309f63682a1f225b3d71372cff..58f00829b803d12d5c60b05d6bffb2708daf8e8c 100644 --- a/doc/development/testing_guide/testing_levels.md +++ b/doc/development/testing_guide/testing_levels.md @@ -103,7 +103,7 @@ graph RL For complex Vuex mutations, you should separate the tests from other parts of the Vuex store to simplify problem-solving. #### When *not* to use unit tests - + - **Non-exported functions or classes**: Anything not exported from a module can be considered private or an implementation detail, and doesn't need to be tested. - **Constants**: @@ -200,7 +200,7 @@ graph RL - **All server requests**: Similar to unit tests, when running component tests, the backend may not be reachable, so all outgoing requests need to be mocked. - **Asynchronous background operations**: - Similar to unit tests, background operations cannot be stopped or waited on, so they will continue running in the following tests and cause side effects. + Similar to unit tests, background operations cannot be stopped or waited on. This means they will continue running in the following tests and cause side effects. - **Child components**: Every component is tested individually, so child components are mocked. See also [`shallowMount()`](https://vue-test-utils.vuejs.org/api/#shallowmount) diff --git a/doc/integration/elasticsearch.md b/doc/integration/elasticsearch.md index 9ec56d304e0153d9ec17a82b39b431a165591d8d..c2f4fff0ce36056f0bdb90841850879e4892b5dc 100644 --- a/doc/integration/elasticsearch.md +++ b/doc/integration/elasticsearch.md @@ -260,7 +260,7 @@ If the database size is less than 500 MiB, and the size of all hosted repos is l CAUTION: **Warning**: Performing asynchronous indexing will generate a lot of Sidekiq jobs. -Make sure to prepare for this task by either [Horizontally Scaling](../administration/high_availability/README.md#basic-scaling) +Make sure to prepare for this task by having a [Scalable and Highly Available Setup](README.md) or creating [extra Sidekiq processes](../administration/operations/extra_sidekiq_processes.md) 1. [Configure your Elasticsearch host and port](#enabling-elasticsearch). diff --git a/doc/integration/omniauth.md b/doc/integration/omniauth.md index c75a2a90c2ace13dd6290551688ada7e9349466c..8eef5ff26cf874edd5234680570092405121515a 100644 --- a/doc/integration/omniauth.md +++ b/doc/integration/omniauth.md @@ -51,7 +51,7 @@ that are in common for all providers that we need to consider. be created manually or they will not be able to sign in via OmniAuth. - `auto_link_ldap_user` can be used if you have [LDAP / ActiveDirectory](ldap.md) integration enabled. It defaults to false. When enabled, users automatically - created through OmniAuth will be linked to their LDAP entry as well. + created through an OmniAuth provider will have their LDAP identity created in GitLab as well. - `block_auto_created_users` defaults to `true`. If `true` auto created users will be blocked by default and will have to be unblocked by an administrator before they are able to sign in. diff --git a/doc/integration/saml.md b/doc/integration/saml.md index 31d41433e6be6531548e46b2ddef93a56c20eb44..001e2883de0f0707fc6d7708d1d2f3d62acceaf3 100644 --- a/doc/integration/saml.md +++ b/doc/integration/saml.md @@ -188,7 +188,7 @@ tell GitLab which groups are external via the `external_groups:` element: } } ``` -## Required groups +## Required groups **(STARTER ONLY)** >**Note:** This setting is only available on GitLab 10.2 EE and above. @@ -215,7 +215,7 @@ Example: } } ``` -## Admin Groups +## Admin Groups **(STARTER ONLY)** >**Note:** This setting is only available on GitLab 8.8 EE and above. @@ -239,7 +239,7 @@ considered `admin groups`. } } ``` -## Auditor Groups +## Auditor Groups **(STARTER ONLY)** >**Note:** This setting is only available on GitLab 11.4 EE and above. diff --git a/doc/user/markdown.md b/doc/user/markdown.md index 7ad810317f0f9c39996c1f6243284c941c1bed12..c8484380127e7147901b465eaf19d358b517c597 100644 --- a/doc/user/markdown.md +++ b/doc/user/markdown.md @@ -131,26 +131,26 @@ Supported formats (named colors are not supported): Color written inside backticks will be followed by a color "chip": ```markdown -`#F00` -`#F00A` -`#FF0000` -`#FF0000AA` -`RGB(0,255,0)` -`RGB(0%,100%,0%)` -`RGBA(0,255,0,0.3)` -`HSL(540,70%,50%)` -`HSLA(540,70%,50%,0.3)` -``` - -`#F00` -`#F00A` -`#FF0000` -`#FF0000AA` -`RGB(0,255,0)` -`RGB(0%,100%,0%)` -`RGBA(0,255,0,0.3)` -`HSL(540,70%,50%)` -`HSLA(540,70%,50%,0.3)` +- `#F00` +- `#F00A` +- `#FF0000` +- `#FF0000AA` +- `RGB(0,255,0)` +- `RGB(0%,100%,0%)` +- `RGBA(0,255,0,0.3)` +- `HSL(540,70%,50%)` +- `HSLA(540,70%,50%,0.3)` +``` + +- `#F00` +- `#F00A` +- `#FF0000` +- `#FF0000AA` +- `RGB(0,255,0)` +- `RGB(0%,100%,0%)` +- `RGBA(0,255,0,0.3)` +- `HSL(540,70%,50%)` +- `HSLA(540,70%,50%,0.3)` ### Diagrams and flowcharts @@ -390,7 +390,7 @@ the [asciidoctor user manual](https://asciidoctor.org/docs/user-manual/#activati ### Special GitLab references GFM recognizes special GitLab related references. For example, you can easily reference -an issue, a commit, a team member or even the whole team within a project. GFM will turn +an issue, a commit, a team member, or even the whole team within a project. GFM will turn that reference into a link so you can navigate between them easily. Additionally, GFM recognizes certain cross-project references, and also has a shorthand @@ -581,7 +581,7 @@ Quote break. GFM extends the standard Markdown standard by also supporting multiline blockquotes fenced by `>>>`: -``` +```markdown >>> If you paste a message from somewhere else @@ -630,7 +630,7 @@ def function(): 3-backtick fences. ~~~ -``` +```plaintext ~~~ Tildes are OK too. ~~~ @@ -638,20 +638,20 @@ Tildes are OK too. The three examples above render as: -``` +```python def function(): #indenting works just fine in the fenced code block s = "Python code" print s ``` -``` +```plaintext Using 4 spaces is like using 3-backtick fences. ``` -~~~ +~~~plaintext Tildes are OK too. ~~~ @@ -668,7 +668,7 @@ code when it is inline. Blocks of code are fenced by lines with three back-ticks ```` ``` ```` or three tildes `~~~`, and have the language identified at the end of the first fence: -~~~ +~~~markdown ```javascript var s = "JavaScript syntax highlighting"; alert(s); @@ -714,7 +714,7 @@ markdown = Redcarpet.new("Hello World!") puts markdown.to_html ``` -``` +```plaintext No language indicated, so no syntax highlighting. s = "There is no highlighting for this." But let's throw in a <b>tag</b>. @@ -756,7 +756,7 @@ dealing with code and names that often appear with multiple underscores. As a re GFM extends the standard Markdown standard by ignoring multiple underlines in words, to allow better rendering of Markdown documents discussing code: -```md +```markdown perform_complicated_task do_this_and_do_that_and_another_thing @@ -852,7 +852,7 @@ The IDs are generated from the content of the header according to the following Example: -``` +```markdown # This header has spaces in it ## This header has a :thumbsup: in it # This header has Unicode in it: ՜글 @@ -973,7 +973,7 @@ class for the list of allowed HTML tags and attributes. In addition to the defau <dd>Is something people use sometimes.</dd> <dt>Markdown in HTML</dt> - <dd>Does *not* work **very** well. HTML <em>tags</em> will <b>always</b> work.</dd> + <dd>Does *not* work **very** well. HTML <em>tags</em> will <b>work</b>, in most cases.</dd> </dl> ``` @@ -982,7 +982,7 @@ class for the list of allowed HTML tags and attributes. In addition to the defau <dd>Is something people use sometimes.</dd> <dt>Markdown in HTML</dt> - <dd>Does *not* work **very** well. HTML <em>tags</em> will <b>always</b> work.</dd> + <dd>Does *not* work **very** well. HTML <em>tags</em> will <b>work</b>, in most cases.</dd> </dl> --- @@ -993,12 +993,12 @@ are separated into their own lines: ```html <dl> <dt>Markdown in HTML</dt> - <dd>Does *not* work **very** well. HTML tags will always work.</dd> + <dd>Does *not* work **very** well. HTML tags will work, in most cases.</dd> <dt>Markdown in HTML</dt> <dd> - Does *not* work **very** well. HTML tags will always work. + Does *not* work **very** well. HTML tags will work, in most cases. </dd> </dl> @@ -1008,12 +1008,12 @@ are separated into their own lines: <dl> <dt>Markdown in HTML</dt> - <dd>Does *not* work **very** well. HTML tags will always work.</dd> + <dd>Does *not* work **very** well. HTML tags will work, in most cases.</dd> <dt>Markdown in HTML</dt> <dd> - Does <em>not</em> work <b>very</b> well. HTML tags will always work. + Does <em>not</em> work <b>very</b> well. HTML tags will work, in most cases. </dd> </dl> @@ -1148,7 +1148,7 @@ A new line due to the previous backslash. There are two ways to create links, inline-style and reference-style: -```md +```markdown - This is an [inline-style link](https://www.google.com) - This is a [link to a repository file in the same directory](index.md) - This is a [relative link to a readme one directory higher](../README.md) @@ -1319,7 +1319,7 @@ the paragraph will appear outside the list, instead of properly indented under t Example: -``` +```markdown 1. First ordered list item Paragraph of first item. diff --git a/doc/user/packages/conan_repository/index.md b/doc/user/packages/conan_repository/index.md index c21e539f332a2e622e2c42e0d9f5aad4a6ad3886..522d6652e7a732383d5b74b2281d2cc08a6d8d64 100644 --- a/doc/user/packages/conan_repository/index.md +++ b/doc/user/packages/conan_repository/index.md @@ -39,7 +39,7 @@ conan --version You should see the Conan version printed in the output: -``` +```plaintext Conan version 1.20.5 ``` diff --git a/doc/user/packages/npm_registry/index.md b/doc/user/packages/npm_registry/index.md index 5a3754685dad8785482bee82a3325dbe70a6b014..af848358a4da8005ec9b11f13b4ea503200f0f65 100644 --- a/doc/user/packages/npm_registry/index.md +++ b/doc/user/packages/npm_registry/index.md @@ -49,7 +49,7 @@ npm --version You should see the NPM version printed in the output: -``` +```plaintext 6.10.3 ``` @@ -67,7 +67,7 @@ yarn --version You should see the version printed like so: -``` +```plaintext 1.19.1 ``` diff --git a/doc/user/packages/nuget_repository/index.md b/doc/user/packages/nuget_repository/index.md index 5d3fdf535d220a03556008f35ffd9512ae4fdc26..dd614c2b38e6685b6e02c296e62ea1f7f11cb00d 100644 --- a/doc/user/packages/nuget_repository/index.md +++ b/doc/user/packages/nuget_repository/index.md @@ -26,7 +26,7 @@ nuget help You should see something similar to: -``` +```plaintext NuGet Version: 5.2.0.6090 usage: NuGet <command> [args] [options] Type 'NuGet help <command>' for help on a specific command. diff --git a/doc/user/profile/account/two_factor_authentication.md b/doc/user/profile/account/two_factor_authentication.md index ab701337c2a9abaab5008e8e25e483706a11361a..72ef8de7a3cdd61c127cc4b89564a0c0d29a82ce 100644 --- a/doc/user/profile/account/two_factor_authentication.md +++ b/doc/user/profile/account/two_factor_authentication.md @@ -160,6 +160,7 @@ have lost your code generation device) you can: - [Use a saved recovery code](#use-a-saved-recovery-code). - [Generate new recovery codes using SSH](#generate-new-recovery-codes-using-ssh). +- [Regenerate 2FA recovery codes](#regenerate-2fa-recovery-codes). - [Ask a GitLab administrator to disable two-factor authentication on your account](#ask-a-gitlab-administrator-to-disable-two-factor-authentication-on-your-account). ### Use a saved recovery code @@ -219,6 +220,20 @@ a new set of recovery codes with SSH: After signing in, visit your **Profile settings > Account** immediately to set up two-factor authentication with a new device. +### Regenerate 2FA recovery codes + +To regenerate 2FA recovery codes, you need access to a desktop browser: + +1. Navigate to GitLab. +1. Sign in to your GitLab account. +1. Go to your [Profile settings](../index.md#profile-settings). +1. Select **{account}** **Account > Two-Factor Authentication (2FA)**. +1. If you've already configured 2FA, click **Manage two-factor authentication**. +1. In the **Register Two-Factor Authenticator** pane, click **Regenerate recovery codes**. + +NOTE: **Note:** +If you regenerate 2FA recovery codes, save them. You won't be able to use any previously created 2FA codes. + ### Ask a GitLab administrator to disable two-factor authentication on your account If you cannot use a saved recovery code or generate new recovery codes, ask a diff --git a/doc/user/project/clusters/add_remove_clusters.md b/doc/user/project/clusters/add_remove_clusters.md index 6106c86ce3941e7a93bfdfe62d85ed3e923aabe3..3e1e1694f0dc862cfc435481795a5084515544a2 100644 --- a/doc/user/project/clusters/add_remove_clusters.md +++ b/doc/user/project/clusters/add_remove_clusters.md @@ -5,7 +5,7 @@ GitLab offers integrated cluster creation for the following Kubernetes providers - Google Kubernetes Engine (GKE). - Amazon Elastic Kubernetes Service (EKS). -In addition, GitLab can integrate with any standard Kubernetes provider, either on-premise or hosted. +GitLab can also integrate with any standard Kubernetes provider, either on-premise or hosted. TIP: **Tip:** Every new Google Cloud Platform (GCP) account receives [$300 in credit upon sign up](https://console.cloud.google.com/freetrial), diff --git a/doc/user/project/deploy_tokens/index.md b/doc/user/project/deploy_tokens/index.md index 728f09ca787c12e260d2692bce5058d23238ce39..4479653417c509f2acd4e14a0192f2d3028b58b3 100644 --- a/doc/user/project/deploy_tokens/index.md +++ b/doc/user/project/deploy_tokens/index.md @@ -9,11 +9,11 @@ at midnight UTC and that they can be only managed by [maintainers](../../permiss ## Creating a Deploy Token -You can create as many deploy tokens as you like from the settings of your project: +You can create as many deploy tokens as you like from the settings of your project. Alternatively, you can also create [group-scoped deploy tokens](#group-deploy-token). 1. Log in to your GitLab account. -1. Go to the project you want to create Deploy Tokens for. -1. Go to **Settings** > **Repository**. +1. Go to the project (or group) you want to create Deploy Tokens for. +1. Go to **{settings}** **Settings** > **CI / CD**. 1. Click on "Expand" on **Deploy Tokens** section. 1. Choose a name, expiry date (optional), and username (optional) for the token. 1. Choose the [desired scopes](#limiting-scopes-of-a-deploy-token). @@ -77,6 +77,22 @@ docker login -u <username> -p <deploy_token> registry.example.com Just replace `<username>` and `<deploy_token>` with the proper values. Then you can simply pull images from your Container Registry. +### Group Deploy Token + +> [Introduced](https://gitlab.com/gitlab-org/gitlab/issues/21765) in GitLab 12.9. + +A deploy token created at the group level can be used across all projects that +belong either to the specific group or to one of its subgroups. + +To use a group deploy token: + +1. [Create](#creating-a-deploy-token) a deploy token for a group. +1. Use it the same way you use a project deploy token when + [cloning a repository](#git-clone-a-repository). + +The scopes applied to a group deploy token (such as `read_repository`) will +apply consistently when cloning the repository of related projects. + ### GitLab Deploy Token > [Introduced][ce-18414] in GitLab 10.8. diff --git a/doc/user/project/description_templates.md b/doc/user/project/description_templates.md index d59d4eec1743d8ca37a5decbd6c44b5245bf703f..84b74692725cce361da88be7ae42ddb72acf8d07 100644 --- a/doc/user/project/description_templates.md +++ b/doc/user/project/description_templates.md @@ -91,7 +91,7 @@ It is possible to use [quick actions](quick_actions.md) within description templ Here is an example for a Bug report template: -``` +```plaintext Summary (Summarize the bug encountered concisely) diff --git a/doc/user/project/import/github.md b/doc/user/project/import/github.md index 175110cd535acec9e37f860ce7c570864bd523df..a475dde59fd877798c7e34b26fa3c3681f4b174b 100644 --- a/doc/user/project/import/github.md +++ b/doc/user/project/import/github.md @@ -40,10 +40,13 @@ in which case it defaults to the default project visibility. When issues and pull requests are being imported, the importer attempts to find their GitHub authors and assignees in the database of the GitLab instance (note that pull requests are called "merge requests" in GitLab). -For this association to succeed, prior to the import, each GitHub author and assignee in the repository must -have either previously logged in to a GitLab account using the GitHub icon **or** have a GitHub account with -a [primary email address](https://help.github.com/en/github/setting-up-and-managing-your-github-user-account/setting-your-commit-email-address) that -matches their GitLab account's email address. +For this association to succeed, each GitHub author and assignee in the repository +must meet one of the following conditions prior to the import: + +- Have previously logged in to a GitLab account using the GitHub icon. +- Have a GitHub account with a + [primary email address](https://help.github.com/en/github/setting-up-and-managing-your-github-user-account/setting-your-commit-email-address) + that matches their GitLab account's email address. If a user referenced in the project is not found in GitLab's database, the project creator (typically the user that initiated the import process) is set as the author/assignee, but a note on the issue mentioning the original diff --git a/doc/user/project/import/index.md b/doc/user/project/import/index.md index 8fd4325b5cd762740099faa831db5074954013b6..ab2f942e911f50432be7a4777d8d67f1a4f43b2f 100644 --- a/doc/user/project/import/index.md +++ b/doc/user/project/import/index.md @@ -24,7 +24,7 @@ There is also the option of [connecting your external repository to get CI/CD be ## Migrating from self-hosted GitLab to GitLab.com -If you only need to migrate Git repos, you can [import each project by URL](repo_by_url.md), but issues and merge requests can't be imported. +If you only need to migrate Git repos, you can [import each project by URL](repo_by_url.md). Issues and merge requests can't be imported. If you want to retain all metadata like issues and merge requests, you can use the [import/export feature](../settings/import_export.md) to export projects from self-hosted GitLab and import those projects into GitLab.com. diff --git a/doc/user/project/index.md b/doc/user/project/index.md index 87837d50bbe8963b171b0345051879c1bb60d763..661c1eebf6d06221b07a1247676b5d5f991acd09 100644 --- a/doc/user/project/index.md +++ b/doc/user/project/index.md @@ -94,7 +94,7 @@ When you create a project in GitLab, you'll have access to a large number of your code blocks, overriding GitLab's default choice of language. - [Badges](badges.md): badges for the project overview. - [Releases](releases/index.md): a way to track deliverables in your project as snapshot in time of - the source, build output, and other metadata or artifacts + the source, build output, other metadata, and other artifacts associated with a released version of your code. - [Conan packages](../packages/conan_repository/index.md): your private Conan repository in GitLab. **(PREMIUM)** - [Maven packages](../packages/maven_repository/index.md): your private Maven repository in GitLab. **(PREMIUM)** diff --git a/doc/user/project/integrations/gitlab_slack_application.md b/doc/user/project/integrations/gitlab_slack_application.md index 72b1318a16ab22ae3ffe07c2fbe6563d8ec87b87..4bc44d1d7d854f5702a0c5be9426282fa9b768aa 100644 --- a/doc/user/project/integrations/gitlab_slack_application.md +++ b/doc/user/project/integrations/gitlab_slack_application.md @@ -56,7 +56,7 @@ We are working on making this configurable in the future. For example, to show the issue number `1001` under the `gitlab-org/gitlab` project, you would do: -``` +```plaintext /gitlab gitlab-org/gitlab issue show 1001 ``` diff --git a/doc/user/project/integrations/hipchat.md b/doc/user/project/integrations/hipchat.md index 85c3eda1208d5e5412144eea0d2ff57597dfbf7e..347f7973c84d250d579009ff942676b1dae8e24f 100644 --- a/doc/user/project/integrations/hipchat.md +++ b/doc/user/project/integrations/hipchat.md @@ -25,7 +25,7 @@ allow GitLab to send messages only to *one* room. 1. In the "Send messages to this room by posting this URL" column, you should see a URL in the format: -``` +```plaintext https://api.hipchat.com/v2/room/<room>/notification?auth_token=<token> ``` diff --git a/doc/user/project/integrations/irker.md b/doc/user/project/integrations/irker.md index 47017843233cc7c97c104c5c42b0139a559e2b12..cadf01c382a071d3ac7696ff9e70cd047a257be4 100644 --- a/doc/user/project/integrations/irker.md +++ b/doc/user/project/integrations/irker.md @@ -11,7 +11,7 @@ See the project homepage for further info: <https://gitlab.com/esr/irker> You will first need an Irker daemon. You can download the Irker code from its repository on <https://gitlab.com/esr/irker>: -``` +```shell git clone https://gitlab.com/esr/irker.git ``` diff --git a/doc/user/project/integrations/jira.md b/doc/user/project/integrations/jira.md index 1af56b79e822c3877af28b91eaaafb2e04dfd64a..76b1d1876981c816405b896d48385016f8b38528 100644 --- a/doc/user/project/integrations/jira.md +++ b/doc/user/project/integrations/jira.md @@ -45,11 +45,11 @@ In order to enable the Jira service in GitLab, you need to first configure the p #### Jira Server -When connecting to **Jira Server**, which supports basic authentication, a **username and password** are required. Note that connecting to Jira Server via CAS is not possible. [Set up a user in Jira Server](jira_server_configuration.md) first and then proceed to [Configuring GitLab](#configuring-gitlab). +**Jira Server** supports basic authentication. When connecting, a **username and password** are required. Note that connecting to Jira Server via CAS is not possible. [Set up a user in Jira Server](jira_server_configuration.md) first and then proceed to [Configuring GitLab](#configuring-gitlab). #### Jira Cloud -When connecting to **Jira Cloud**, which supports authentication via API token, an **email and API token**, are required. [Set up a user in Jira Cloud](jira_cloud_configuration.md) first and then proceed to [Configuring GitLab](#configuring-gitlab). +**Jira Cloud** supports authentication through an API token. When connecting to **Jira Cloud**, an **email and API token** are required. [Set up a user in Jira Cloud](jira_cloud_configuration.md) first and then proceed to [Configuring GitLab](#configuring-gitlab). ### Configuring GitLab @@ -119,7 +119,7 @@ link back to GitLab. This means that in comments in merge requests and commits referencing an issue, e.g., `PROJECT-7`, will add a comment in Jira issue in the format: -``` +```plaintext USER mentioned this issue in RESOURCE_NAME of [PROJECT_NAME|LINK_TO_COMMENT]: ENTITY_TITLE ``` diff --git a/doc/user/project/integrations/prometheus_library/kubernetes.md b/doc/user/project/integrations/prometheus_library/kubernetes.md index 7433210b5532d522761dc4a6fecf033bdc762d68..ca1555c793b1d0c4acb02134430ecb3361c3a699 100644 --- a/doc/user/project/integrations/prometheus_library/kubernetes.md +++ b/doc/user/project/integrations/prometheus_library/kubernetes.md @@ -13,13 +13,13 @@ integration services must be enabled. - Average Memory Usage (MB): - ``` + ```prometheus avg(sum(container_memory_usage_bytes{container_name!="POD",pod_name=~"^%{ci_environment_slug}-([^c].*|c([^a]|a([^n]|n([^a]|a([^r]|r[^y])))).*|)-(.*)",namespace="%{kube_namespace}"}) by (job)) without (job) / count(avg(container_memory_usage_bytes{container_name!="POD",pod_name=~"^%{ci_environment_slug}-([^c].*|c([^a]|a([^n]|n([^a]|a([^r]|r[^y])))).*|)-(.*)",namespace="%{kube_namespace}"}) without (job)) /1024/1024 ``` - Average CPU Utilization (%): - ``` + ```prometheus avg(sum(rate(container_cpu_usage_seconds_total{container_name!="POD",pod_name=~"^%{ci_environment_slug}-([^c].*|c([^a]|a([^n]|n([^a]|a([^r]|r[^y])))).*|)-(.*)",namespace="%{kube_namespace}"}[15m])) by (job)) without (job) / count(sum(rate(container_cpu_usage_seconds_total{container_name!="POD",pod_name=~"^%{ci_environment_slug}-([^c].*|c([^a]|a([^n]|n([^a]|a([^r]|r[^y])))).*|)-(.*)",namespace="%{kube_namespace}"}[15m])) by (pod_name)) ``` @@ -48,12 +48,12 @@ These metrics expect the [Deployment](https://kubernetes.io/docs/concepts/worklo - Average Memory Usage (MB) - ``` + ```prometheus avg(sum(container_memory_usage_bytes{container_name!="POD",pod_name=~"^%{ci_environment_slug}-canary-(.*)",namespace="%{kube_namespace}"}) by (job)) without (job) / count(avg(container_memory_usage_bytes{container_name!="POD",pod_name=~"^%{ci_environment_slug}-canary-(.*)",namespace="%{kube_namespace}"}) without (job)) /1024/1024 ``` - Average CPU Utilization (%) - ``` + ```prometheus avg(sum(rate(container_cpu_usage_seconds_total{container_name!="POD",pod_name=~"^%{ci_environment_slug}-canary-(.*)",namespace="%{kube_namespace}"}[15m])) by (job)) without (job) / count(sum(rate(container_cpu_usage_seconds_total{container_name!="POD",pod_name=~"^%{ci_environment_slug}-canary-(.*)",namespace="%{kube_namespace}"}[15m])) by (pod_name)) ``` diff --git a/doc/user/project/new_ci_build_permissions_model.md b/doc/user/project/new_ci_build_permissions_model.md index d1bb23396e487275d680d54c08fd89033b48401f..09af6d05690ce4aa32c7ef2c6c7f1640a6a9af59 100644 --- a/doc/user/project/new_ci_build_permissions_model.md +++ b/doc/user/project/new_ci_build_permissions_model.md @@ -68,7 +68,7 @@ Let's consider the following scenario: A unique job token is generated for each job and provides the user read access all projects that would be normally accessible to the user creating that job. The unique job token does not have any write permissions, but there -is a [proposal to add support](https://gitlab.com/gitlab-org/gitlab-foss/issues/18106). +is a [proposal to add support](https://gitlab.com/gitlab-org/gitlab/issues/35067). We try to make sure that this token doesn't leak by: @@ -101,14 +101,14 @@ allowing pulling and pushing Docker images from within the CI job. GitLab would create a special checkout URL like: -``` +```plaintext https://gitlab-ci-token:<project-runners-token>/gitlab.com/gitlab-org/gitlab-foss.git ``` And then the users could also use it in their CI jobs all Docker related commands to interact with GitLab Container Registry. For example: -``` +```shell docker login -u gitlab-ci-token -p $CI_JOB_TOKEN registry.gitlab.com ``` @@ -173,14 +173,14 @@ As a user: The [Job environment variable][jobenv] `CI_JOB_TOKEN` can be used to authenticate any clones of dependent repositories. For example: -``` +```shell git clone https://gitlab-ci-token:${CI_JOB_TOKEN}@gitlab.com/<user>/<mydependentrepo>.git ``` It can also be used for system-wide authentication (only do this in a docker container, it will overwrite ~/.netrc): -``` +```shell echo -e "machine gitlab.com\nlogin gitlab-ci-token\npassword ${CI_JOB_TOKEN}" > ~/.netrc ``` diff --git a/doc/user/search/index.md b/doc/user/search/index.md index 70ab9af0bccd40cac4a7dbcbf9538963a5a89899..407578fd4df24b3e1b8c200ddd6dc308fdf5afc1 100644 --- a/doc/user/search/index.md +++ b/doc/user/search/index.md @@ -41,7 +41,7 @@ groups: - [Label](../project/labels.md) - My-reaction - Confidential - - Epic ([Introduced](https://gitlab.com/gitlab-org/gitlab/issues/195704) in GitLab 12.8) + - Epic ([Introduced](https://gitlab.com/gitlab-org/gitlab/issues/195704) in GitLab 12.9) - Search for this text 1. Select or type the operator to use for filtering the attribute. The following operators are available: diff --git a/ee/app/assets/javascripts/analytics/cycle_analytics/components/base.vue b/ee/app/assets/javascripts/analytics/cycle_analytics/components/base.vue index c565d47e748505646ca059b70b51221d8b74780e..6d04a1e971904e57fd0494429c01e8c866146cdd 100644 --- a/ee/app/assets/javascripts/analytics/cycle_analytics/components/base.vue +++ b/ee/app/assets/javascripts/analytics/cycle_analytics/components/base.vue @@ -42,6 +42,10 @@ export default { type: String, required: true, }, + hideGroupDropDown: { + type: Boolean, + required: true, + }, }, computed: { ...mapState([ @@ -206,6 +210,7 @@ export default { class="mt-3 py-2 px-3 d-flex bg-gray-light border-top border-bottom flex-column flex-md-row justify-content-between" > <groups-dropdown-filter + v-if="!hideGroupDropDown" class="js-groups-dropdown-filter dropdown-select" :query-params="$options.groupsQueryParams" :default-group="selectedGroup" diff --git a/ee/app/assets/javascripts/analytics/cycle_analytics/index.js b/ee/app/assets/javascripts/analytics/cycle_analytics/index.js index b5bc9f398577ac42c95260beb9adde61980f587d..6f7e17ffab0646a072253fc01c35a2e71a28e9b8 100644 --- a/ee/app/assets/javascripts/analytics/cycle_analytics/index.js +++ b/ee/app/assets/javascripts/analytics/cycle_analytics/index.js @@ -2,10 +2,11 @@ import Vue from 'vue'; import CycleAnalytics from './components/base.vue'; import createStore from './store'; import { buildCycleAnalyticsInitialData } from '../shared/utils'; +import { parseBoolean } from '~/lib/utils/common_utils'; export default () => { const el = document.querySelector('#js-cycle-analytics-app'); - const { emptyStateSvgPath, noDataSvgPath, noAccessSvgPath } = el.dataset; + const { emptyStateSvgPath, noDataSvgPath, noAccessSvgPath, hideGroupDropDown } = el.dataset; const initialData = buildCycleAnalyticsInitialData(el.dataset); const store = createStore(); @@ -21,6 +22,7 @@ export default () => { emptyStateSvgPath, noDataSvgPath, noAccessSvgPath, + hideGroupDropDown: parseBoolean(hideGroupDropDown), }, }), }); diff --git a/ee/app/assets/javascripts/geo_node_form/components/geo_node_form_capacities.vue b/ee/app/assets/javascripts/geo_node_form/components/geo_node_form_capacities.vue index 9423d0da68a9446db1f818de074642b572e0507f..4e10f75eed006bcb5912731e41e67c4a38dbe929 100644 --- a/ee/app/assets/javascripts/geo_node_form/components/geo_node_form_capacities.vue +++ b/ee/app/assets/javascripts/geo_node_form/components/geo_node_form_capacities.vue @@ -35,14 +35,6 @@ export default { key: 'filesMaxCapacity', conditional: 'secondary', }, - { - id: 'node-verification-capacity-field', - label: __('Verification capacity'), - description: __( - 'Control the maximum concurrency of verification operations for this Geo node', - ), - key: 'verificationMaxCapacity', - }, { id: 'node-container-repository-capacity-field', label: __('Container repositories sync capacity'), @@ -50,6 +42,15 @@ export default { 'Control the maximum concurrency of container repository operations for this Geo node', ), key: 'containerRepositoriesMaxCapacity', + conditional: 'secondary', + }, + { + id: 'node-verification-capacity-field', + label: __('Verification capacity'), + description: __( + 'Control the maximum concurrency of verification operations for this Geo node', + ), + key: 'verificationMaxCapacity', }, { id: 'node-reverification-interval-field', diff --git a/ee/app/assets/javascripts/pages/groups/analytics/cycle_analytics/index.js b/ee/app/assets/javascripts/pages/groups/analytics/cycle_analytics/index.js new file mode 100644 index 0000000000000000000000000000000000000000..c3cae64f6c33d6d51e2171a56f896c413752f074 --- /dev/null +++ b/ee/app/assets/javascripts/pages/groups/analytics/cycle_analytics/index.js @@ -0,0 +1,3 @@ +import initCycleAnalyticsApp from 'ee/analytics/cycle_analytics/index'; + +document.addEventListener('DOMContentLoaded', initCycleAnalyticsApp); diff --git a/ee/app/controllers/analytics/analytics_controller.rb b/ee/app/controllers/analytics/analytics_controller.rb index f87e3d817976dc4208ec39d327acbf38c235d55f..885bda8c4c26b0480f2c3cede06b926859b731f4 100644 --- a/ee/app/controllers/analytics/analytics_controller.rb +++ b/ee/app/controllers/analytics/analytics_controller.rb @@ -4,8 +4,10 @@ class Analytics::AnalyticsController < Analytics::ApplicationController def index if Feature.disabled?(:group_level_productivity_analytics, default_enabled: true) && Gitlab::Analytics.productivity_analytics_enabled? redirect_to analytics_productivity_analytics_path - elsif Gitlab::Analytics.cycle_analytics_enabled? + elsif Feature.disabled?(:group_level_cycle_analytics) && Gitlab::Analytics.cycle_analytics_enabled? redirect_to analytics_cycle_analytics_path + elsif can?(current_user, :read_instance_statistics) + redirect_to instance_statistics_dev_ops_score_index_path else render_404 end diff --git a/ee/app/controllers/groups/analytics/cycle_analytics_controller.rb b/ee/app/controllers/groups/analytics/cycle_analytics_controller.rb new file mode 100644 index 0000000000000000000000000000000000000000..df6f48e09ab65303ec932a435136e950c6d899d8 --- /dev/null +++ b/ee/app/controllers/groups/analytics/cycle_analytics_controller.rb @@ -0,0 +1,33 @@ +# frozen_string_literal: true + +class Groups::Analytics::CycleAnalyticsController < Groups::Analytics::ApplicationController + include CycleAnalyticsParams + + layout 'group' + + check_feature_flag Gitlab::Analytics::CYCLE_ANALYTICS_FEATURE_FLAG + increment_usage_counter Gitlab::UsageDataCounters::CycleAnalyticsCounter, :views, only: :show + + before_action do + push_frontend_feature_flag(:customizable_cycle_analytics) + push_frontend_feature_flag(:cycle_analytics_scatterplot_enabled, default_enabled: true) + push_frontend_feature_flag(:cycle_analytics_scatterplot_median_enabled, default_enabled: true) + push_frontend_feature_flag(:tasks_by_type_chart) + end + + before_action :load_group, only: :show + before_action :load_project, only: :show + before_action :build_request_params, only: :show + + def build_request_params + @request_params ||= Gitlab::Analytics::CycleAnalytics::RequestParams.new(allowed_params.merge(group: @group), current_user: current_user) + end + + def allowed_params + params.permit( + :created_after, + :created_before, + project_ids: [] + ) + end +end diff --git a/ee/app/controllers/groups/contribution_analytics_controller.rb b/ee/app/controllers/groups/contribution_analytics_controller.rb index 3f7e1f638b6fa70329a89cf6ff20a1c400e410b6..b3d8f9e043e732e4ffd5e35f99674a2c0c217082 100644 --- a/ee/app/controllers/groups/contribution_analytics_controller.rb +++ b/ee/app/controllers/groups/contribution_analytics_controller.rb @@ -3,6 +3,7 @@ class Groups::ContributionAnalyticsController < Groups::ApplicationController before_action :group before_action :check_contribution_analytics_available! + before_action :authorize_read_contribution_analytics! layout 'group' @@ -27,6 +28,28 @@ def data_collector end def check_contribution_analytics_available! - render_404 unless @group.feature_available?(:contribution_analytics) || LicenseHelper.show_promotions?(current_user) + return if group_has_access_to_feature? + + show_promotions? ? render_promotion : render_404 + end + + def authorize_read_contribution_analytics! + render_403 unless user_has_access_to_feature? + end + + def render_promotion + render 'shared/promotions/_promote_contribution_analytics' + end + + def show_promotions? + LicenseHelper.show_promotions?(current_user) + end + + def group_has_access_to_feature? + @group.feature_available?(:contribution_analytics) + end + + def user_has_access_to_feature? + can?(current_user, :read_group_contribution_analytics, @group) end end diff --git a/ee/app/helpers/ee/analytics_navbar_helper.rb b/ee/app/helpers/ee/analytics_navbar_helper.rb index e37385ed0e18fd881779e24f32973b68fe999f51..2b461582bfd926ac22db1dd41994ed53c62ef517 100644 --- a/ee/app/helpers/ee/analytics_navbar_helper.rb +++ b/ee/app/helpers/ee/analytics_navbar_helper.rb @@ -19,7 +19,8 @@ def group_analytics_navbar_links(group, current_user) contribution_analytics_navbar_link(group, current_user), group_insights_navbar_link(group, current_user), issues_analytics_navbar_link(group, current_user), - productivity_analytics_navbar_link(group, current_user) + productivity_analytics_navbar_link(group, current_user), + group_cycle_analytics_navbar_link(group, current_user) ].compact end @@ -36,6 +37,18 @@ def project_issues_analytics_navbar_link(project, current_user) ) end + def group_cycle_analytics_navbar_link(group, current_user) + return unless ::Feature.enabled?(:analytics_pages_under_group_analytics_sidebar, group, default_enabled: true) + return unless ::Feature.enabled?(:group_level_cycle_analytics) + return unless group_sidebar_link?(:cycle_analytics) + + navbar_sub_item( + title: _('Value Stream Analytics'), + path: 'groups/analytics/cycle_analytics#show', + link: group_analytics_cycle_analytics_path(group) + ) + end + def productivity_analytics_navbar_link(group, current_user) return unless ::Feature.enabled?(:analytics_pages_under_group_analytics_sidebar, group, default_enabled: true) return unless ::Feature.enabled?(:group_level_productivity_analytics, default_enabled: true) @@ -44,7 +57,7 @@ def productivity_analytics_navbar_link(group, current_user) navbar_sub_item( title: _('Productivity Analytics'), path: 'groups/analytics/productivity_analytics#show', - link: group_analytics_productivity_analytics_path(@group) + link: group_analytics_productivity_analytics_path(group) ) end diff --git a/ee/app/helpers/ee/dashboard_helper.rb b/ee/app/helpers/ee/dashboard_helper.rb index d865190014172a4b7b9234c64280fc75ec2ebf02..34ed96707012bcaac91fa0430f57d4d4876c4be4 100644 --- a/ee/app/helpers/ee/dashboard_helper.rb +++ b/ee/app/helpers/ee/dashboard_helper.rb @@ -32,7 +32,7 @@ def has_start_trial? end def analytics_nav_url - if ::Gitlab::Analytics.any_features_enabled? + if ::Feature.disabled?(:group_level_cycle_analytics) && ::Gitlab::Analytics.any_features_enabled? return analytics_root_path end @@ -48,7 +48,7 @@ def analytics_nav_url override :get_dashboard_nav_links def get_dashboard_nav_links super.tap do |links| - links << :analytics if ::Gitlab::Analytics.any_features_enabled? + links << :analytics if ::Feature.disabled?(:group_level_cycle_analytics) && ::Gitlab::Analytics.any_features_enabled? if can?(current_user, :read_operations_dashboard) links << :environments if ::Feature.enabled?(:environments_dashboard, current_user, default_enabled: true) diff --git a/ee/app/helpers/ee/groups_helper.rb b/ee/app/helpers/ee/groups_helper.rb index 061cb1494aae1a7ed8ac32d26f5497ecc7152e67..b8c8bd9de0343953aae728500f4c5f63b06a2c28 100644 --- a/ee/app/helpers/ee/groups_helper.rb +++ b/ee/app/helpers/ee/groups_helper.rb @@ -98,6 +98,10 @@ def show_discover_group_security?(group) def get_group_sidebar_links links = super + if can?(current_user, :read_group_cycle_analytics, @group) + links << :cycle_analytics + end + if can?(current_user, :read_group_contribution_analytics, @group) || show_promotions? links << :contribution_analytics end diff --git a/ee/app/models/concerns/elastic/application_versioned_search.rb b/ee/app/models/concerns/elastic/application_versioned_search.rb index 06afe217c4ec55ca61d52273513bce65037241ec..a0c1e5e663d6ad8414bd63ed9a6079227c894daf 100644 --- a/ee/app/models/concerns/elastic/application_versioned_search.rb +++ b/ee/app/models/concerns/elastic/application_versioned_search.rb @@ -45,10 +45,14 @@ class << self end def maintain_elasticsearch_create + return if maintain_elasticsearch_incremental_bulk + ElasticIndexerWorker.perform_async(:index, self.class.to_s, self.id, self.es_id) end def maintain_elasticsearch_update + return if maintain_elasticsearch_incremental_bulk + ElasticIndexerWorker.perform_async( :update, self.class.to_s, @@ -58,11 +62,21 @@ def maintain_elasticsearch_update end def maintain_elasticsearch_destroy + return if maintain_elasticsearch_incremental_bulk + ElasticIndexerWorker.perform_async( :delete, self.class.to_s, self.id, self.es_id, es_parent: self.es_parent ) end + def maintain_elasticsearch_incremental_bulk + return false unless Feature.enabled?(:elastic_bulk_incremental_updates, self.project) + + ::Elastic::ProcessBookkeepingService.track!(self) + + true + end + class_methods do def __elasticsearch__ @__elasticsearch__ ||= ::Elastic::MultiVersionClassProxy.new(self) diff --git a/ee/app/models/concerns/elastic/projects_search.rb b/ee/app/models/concerns/elastic/projects_search.rb index 4a8debb39ea1ca81daa2486e28ed9bbb3639459a..4dcedac4ff1b8918d217c25a19e01ca7394bf8c5 100644 --- a/ee/app/models/concerns/elastic/projects_search.rb +++ b/ee/app/models/concerns/elastic/projects_search.rb @@ -19,6 +19,14 @@ def use_elasticsearch? ::Gitlab::CurrentSettings.elasticsearch_indexes_project?(self) end + def maintain_elasticsearch_incremental_bulk + # TODO: ElasticIndexerWorker does extra work for project hooks, so we + # can't use the incremental-bulk indexer for projects yet. + # + # https://gitlab.com/gitlab-org/gitlab/issues/207494 + false + end + def each_indexed_association INDEXED_ASSOCIATIONS.each do |association_name| association = self.association(association_name) diff --git a/ee/app/models/ee/user_bot_type_enums.rb b/ee/app/models/ee/user_bot_type_enums.rb index f86987ff43ded45c587cff494093b78672305d0f..a9f3ecf9e71aee0899ca5ca259fdf23531c3c213 100644 --- a/ee/app/models/ee/user_bot_type_enums.rb +++ b/ee/app/models/ee/user_bot_type_enums.rb @@ -9,7 +9,7 @@ module UserBotTypeEnums override :bots def bots - # When adding a new key, please ensure you are not redefining a key that already exists in app/models/user_bot_types_enums.rb + # When adding a new key, please ensure you are not redefining a key that already exists in app/models/user_bot_type_enums.rb bots_hash = super.merge(support_bot: 1, visual_review_bot: 3) bots_hash[:custom] = 99 if ::Gitlab.com? bots_hash diff --git a/ee/app/policies/ee/group_policy.rb b/ee/app/policies/ee/group_policy.rb index 8f9b697f21b452f9e0ec560cc59ecaf737529d75..f19de3b79413cc3134b1f5523c23c788c71b4bed 100644 --- a/ee/app/policies/ee/group_policy.rb +++ b/ee/app/policies/ee/group_policy.rb @@ -75,7 +75,7 @@ module GroupPolicy rule { can?(:read_cluster) & cluster_deployments_available } .enable :read_cluster_environments - rule { can?(:read_group) & contribution_analytics_available } + rule { has_access & contribution_analytics_available } .enable :read_group_contribution_analytics rule { reporter & cycle_analytics_available }.policy do diff --git a/ee/app/services/elastic/process_bookkeeping_service.rb b/ee/app/services/elastic/process_bookkeeping_service.rb new file mode 100644 index 0000000000000000000000000000000000000000..5c0f0d54c9f10e224de194d9a5e347ed8fb15b80 --- /dev/null +++ b/ee/app/services/elastic/process_bookkeeping_service.rb @@ -0,0 +1,109 @@ +# frozen_string_literal: true + +module Elastic + class ProcessBookkeepingService + REDIS_SET_KEY = 'elastic:incremental:updates:0:zset' + REDIS_SCORE_KEY = 'elastic:incremental:updates:0:score' + LIMIT = 1000 + + class << self + # Add some records to the processing queue. Items must be serializable to + # a Gitlab::Elastic::DocumentReference + def track!(*items) + return true if items.empty? + + items.map! { |item| ::Gitlab::Elastic::DocumentReference.serialize(item) } + + with_redis do |redis| + # Efficiently generate a guaranteed-unique score for each item + max = redis.incrby(REDIS_SCORE_KEY, items.size) + min = (max - items.size) + 1 + + (min..max).zip(items).each_slice(1000) do |group| + logger.debug(message: 'track_items', count: group.count, items: group) + + redis.zadd(REDIS_SET_KEY, group) + end + end + + true + end + + def queue_size + with_redis { |redis| redis.zcard(REDIS_SET_KEY) } + end + + def clear_tracking! + with_redis { |redis| redis.del(REDIS_SET_KEY, REDIS_SCORE_KEY) } + end + + def logger + # build already caches the logger via request store + ::Gitlab::Elasticsearch::Logger.build + end + + def with_redis(&blk) + Gitlab::Redis::SharedState.with(&blk) # rubocop:disable CodeReuse/ActiveRecord + end + end + + def execute + self.class.with_redis { |redis| execute_with_redis(redis) } + end + + private + + def execute_with_redis(redis) + specs = redis.zrangebyscore(REDIS_SET_KEY, '-inf', '+inf', limit: [0, LIMIT], with_scores: true) + return if specs.empty? + + first_score = specs.first.last + last_score = specs.last.last + + logger.info( + message: 'bulk_indexing_start', + records_count: specs.count, + first_score: first_score, + last_score: last_score + ) + + specs.each { |spec, _| submit_document(spec) } + failures = bulk_indexer.flush + + # Re-enqueue any failures so they are retried + self.class.track!(*failures) if failures.present? + + # Remove all the successes + redis.zremrangebyscore(REDIS_SET_KEY, first_score, last_score) + + logger.info( + message: 'bulk_indexing_end', + records_count: specs.count, + failures_count: failures.count, + first_score: first_score, + last_score: last_score + ) + end + + def submit_document(spec) + ref = ::Gitlab::Elastic::DocumentReference.deserialize(spec) + + bulk_indexer.process(ref) + rescue ::Gitlab::Elastic::DocumentReference::InvalidError => err + logger.warn( + message: 'submit_document_failed', + reference: spec, + error_class: err.class.to_s, + error_message: err.message + ) + end + + def bulk_indexer + @bulk_indexer ||= ::Gitlab::Elastic::BulkIndexer.new(logger: logger) + end + + def logger + self.class.logger + end + end +end diff --git a/ee/app/views/admin/geo/nodes/_form.html.haml b/ee/app/views/admin/geo/nodes/_form.html.haml index f19507a4151d1c07257ff6daeab934b5b30ef2d1..7e7cfc29ddb3ca44501b8c2aae661d36703e3e2d 100644 --- a/ee/app/views/admin/geo/nodes/_form.html.haml +++ b/ee/app/views/admin/geo/nodes/_form.html.haml @@ -52,18 +52,18 @@ = form.number_field :files_max_capacity, class: 'form-control col-sm-2', min: 0 .form-text.text-muted= s_('Geo|Control the maximum concurrency of LFS/attachment backfill for this secondary node') +.form-row.form-group.js-hide-if-geo-primary{ class: ('hidden' unless geo_node.secondary?) } + .col-sm-8 + = form.label :container_repositories_max_capacity, s_('Geo|Container repositories sync capacity'), class: 'font-weight-bold' + = form.number_field :container_repositories_max_capacity, class: 'form-control col-sm-2', min: 0 + .form-text.text-muted= s_('Geo|Control the maximum concurrency of container repository operations for this Geo node') + .form-row.form-group .col-sm-8 = form.label :verification_max_capacity, s_('Geo|Verification capacity'), class: 'font-weight-bold' = form.number_field :verification_max_capacity, class: 'form-control col-sm-2', min: 0 .form-text.text-muted= s_('Geo|Control the maximum concurrency of verification operations for this Geo node') -.form-row.form-group - .col-sm-8 - = form.label :container_repositories_max_capacity, s_('Geo|Container repositories sync capacity'), class: 'font-weight-bold' - = form.number_field :container_repositories_max_capacity, class: 'form-control col-sm-2', min: 0 - .form-text.text-muted= s_('Geo|Control the maximum concurrency of container repository operations for this Geo node') - .form-row.form-group.js-hide-if-geo-secondary{ class: ('hidden' unless geo_node.primary?) } .col-sm-8 = form.label :minimum_reverification_interval, s_('Geo|Re-verification interval'), class: 'font-weight-bold' diff --git a/ee/app/views/groups/analytics/cycle_analytics/show.html.haml b/ee/app/views/groups/analytics/cycle_analytics/show.html.haml new file mode 100644 index 0000000000000000000000000000000000000000..ca24d4437d0a108636c68beaf58297894dfd28db --- /dev/null +++ b/ee/app/views/groups/analytics/cycle_analytics/show.html.haml @@ -0,0 +1,6 @@ +- page_title _("Value Stream Analytics") +- data_attributes = @request_params.valid? ? @request_params.to_data_attributes : {} + +- data_attributes.merge!({ empty_state_svg_path: image_path("illustrations/analytics/cycle-analytics-empty-chart.svg"), no_data_svg_path: image_path("illustrations/analytics/cycle-analytics-empty-chart.svg"), no_access_svg_path: image_path("illustrations/analytics/no-access.svg"), hide_group_drop_down: 'true' }) + +#js-cycle-analytics-app{ data: data_attributes } diff --git a/ee/app/views/groups/dependency_proxies/show.html.haml b/ee/app/views/groups/dependency_proxies/show.html.haml index d38d9bf7d7b79b046d11818bd329e7b21063114e..22f70b789dbdb656be16961bdc548565cf346052 100644 --- a/ee/app/views/groups/dependency_proxies/show.html.haml +++ b/ee/app/views/groups/dependency_proxies/show.html.haml @@ -4,7 +4,7 @@ %h4= _('Dependency proxy') %p - - link_start = '<a href="%{url}">'.html_safe % { url: help_page_path('PLACEHOLDER') } + - link_start = '<a href="%{url}">'.html_safe % { url: help_page_path('user/packages/dependency_proxy/index') } = _('Create a local proxy for storing frequently used upstream images. %{link_start}Learn more%{link_end} about dependency proxies.').html_safe % { link_start: link_start, link_end: '</a>'.html_safe } - if @group.public? diff --git a/ee/app/views/layouts/nav/sidebar/_analytics.html.haml b/ee/app/views/layouts/nav/sidebar/_analytics.html.haml index 9d797bebf12e3e3e0528a3beb91e9d12fdea32c9..f9c201a7e194cf8abab3a3303763b7110f70b9e0 100644 --- a/ee/app/views/layouts/nav/sidebar/_analytics.html.haml +++ b/ee/app/views/layouts/nav/sidebar/_analytics.html.haml @@ -19,7 +19,7 @@ %strong.fly-out-top-item-name = _('Productivity Analytics') - - if Gitlab::Analytics.cycle_analytics_enabled? + - if Feature.disabled?(:group_level_cycle_analytics) && Gitlab::Analytics.cycle_analytics_enabled? = nav_link(controller: :cycle_analytics) do = link_to analytics_cycle_analytics_path, class: 'qa-sidebar-cycle-analytics' do .nav-icon-container diff --git a/ee/app/workers/all_queues.yml b/ee/app/workers/all_queues.yml index 7cfb7233d09729aee441ea82ddb04c1d49b0b310..9ef7d4d1f92b71162ae0910a4272ac8dfab24f6f 100644 --- a/ee/app/workers/all_queues.yml +++ b/ee/app/workers/all_queues.yml @@ -24,6 +24,13 @@ :resource_boundary: :unknown :weight: 1 :idempotent: +- :name: cronjob:elastic_index_bulk_cron + :feature_category: :search + :has_external_dependencies: + :latency_sensitive: + :resource_boundary: :unknown + :weight: 1 + :idempotent: true - :name: cronjob:geo_container_repository_sync_dispatch :feature_category: :geo_replication :has_external_dependencies: diff --git a/ee/app/workers/elastic_index_bulk_cron_worker.rb b/ee/app/workers/elastic_index_bulk_cron_worker.rb new file mode 100644 index 0000000000000000000000000000000000000000..1a95218e738f2e056e4f3847a9eca3a0ddf25a47 --- /dev/null +++ b/ee/app/workers/elastic_index_bulk_cron_worker.rb @@ -0,0 +1,21 @@ +# frozen_string_literal: true + +class ElasticIndexBulkCronWorker + include ApplicationWorker + include Gitlab::ExclusiveLeaseHelpers + + # There is no onward scheduling and this cron handles work from across the + # application, so there's no useful context to add. + include CronjobQueue # rubocop:disable Scalability/CronWorkerContext + + feature_category :search + idempotent! + + def perform + in_lock(self.class.name.underscore, ttl: 10.minutes, retries: 10, sleep_sec: 1) do + Elastic::ProcessBookkeepingService.new.execute + end + rescue Gitlab::ExclusiveLeaseHelpers::FailedToObtainLockError + # We're scheduled on a cronjob, so nothing to do here + end +end diff --git a/ee/changelogs/unreleased/34086-es-bulk-incremental-index-updates.yml b/ee/changelogs/unreleased/34086-es-bulk-incremental-index-updates.yml new file mode 100644 index 0000000000000000000000000000000000000000..455bd01632a9ca5ea5e1a9e1054a4295dee32541 --- /dev/null +++ b/ee/changelogs/unreleased/34086-es-bulk-incremental-index-updates.yml @@ -0,0 +1,5 @@ +--- +title: Add a bulk processor for ES incremental updates +merge_request: +author: +type: added diff --git a/ee/changelogs/unreleased/security-ag-contribution-analytics.yml b/ee/changelogs/unreleased/security-ag-contribution-analytics.yml new file mode 100644 index 0000000000000000000000000000000000000000..a0b0276a1a3d2cabb6a8be948af46308190bcebe --- /dev/null +++ b/ee/changelogs/unreleased/security-ag-contribution-analytics.yml @@ -0,0 +1,5 @@ +--- +title: Don't show Contribution Analytics to users who are not group members +merge_request: +author: +type: security diff --git a/ee/config/routes/group.rb b/ee/config/routes/group.rb index ec9d9d07b5dcd9eee7a761b46f04e521f9c6d20c..b9f14b839874a6cf82a34cf7cac0c28e7c8d79c3 100644 --- a/ee/config/routes/group.rb +++ b/ee/config/routes/group.rb @@ -33,6 +33,7 @@ end namespace :analytics do resource :productivity_analytics, only: :show, constraints: -> (req) { Feature.enabled?(:group_level_productivity_analytics, default_enabled: true) && Gitlab::Analytics.productivity_analytics_enabled? } + resource :cycle_analytics, path: 'value_stream_analytics', only: :show, constraints: -> (req) { Feature.enabled?(:group_level_cycle_analytics) && Gitlab::Analytics.cycle_analytics_enabled? } end resource :ldap, only: [] do diff --git a/ee/db/fixtures/development/20_vulnerabilities.rb b/ee/db/fixtures/development/20_vulnerabilities.rb index 675b194e576a814339aed78dcd679819c5c6d8c0..f4fdde6b3bf71a5dc729efdba92a40144765e379 100644 --- a/ee/db/fixtures/development/20_vulnerabilities.rb +++ b/ee/db/fixtures/development/20_vulnerabilities.rb @@ -45,7 +45,7 @@ def create_vulnerability when :resolved vulnerability.resolved_by = author when :dismissed - vulnerability.closed_by = author + vulnerability.dismissed_by = author end vulnerability.tap(&:save!) diff --git a/ee/lib/gitlab/elastic/bulk_indexer.rb b/ee/lib/gitlab/elastic/bulk_indexer.rb new file mode 100644 index 0000000000000000000000000000000000000000..2e3012f7204b780a801a31f5735f20eae880a8ce --- /dev/null +++ b/ee/lib/gitlab/elastic/bulk_indexer.rb @@ -0,0 +1,148 @@ +# frozen_string_literal: true + +module Gitlab + module Elastic + # Accumulate records and submit to elasticsearch in bulk, respecting limits + # on request size. + # + # Call +process+ to accumulate records in memory, submitting bulk requests + # when the bulk limits are reached. + # + # Once finished, call +flush+. Any errors accumulated earlier will be + # reported by this call. + # + # BulkIndexer is not safe for concurrent use. + class BulkIndexer + include ::Elasticsearch::Model::Client::ClassMethods + + attr_reader :logger, :failures + + def initialize(logger:) + @body = [] + @body_size_bytes = 0 + @failures = [] + @logger = logger + @ref_cache = [] + end + + # Adds or removes a document in elasticsearch, depending on whether the + # database record it refers to can be found + def process(ref) + ref_cache << ref + + if ref.database_record + index(ref) + else + delete(ref) + end + end + + def flush + maybe_send_bulk(force: true).failures + end + + private + + def reset! + @body = [] + @body_size_bytes = 0 + @ref_cache = [] + end + + attr_reader :body, :body_size_bytes, :ref_cache + + def index(ref) + proxy = ref.database_record.__elasticsearch__ + op = build_op(ref, proxy) + + submit({ index: op }, proxy.as_indexed_json) + + maybe_send_bulk + end + + def delete(ref) + proxy = ref.klass.__elasticsearch__ + op = build_op(ref, proxy) + + submit(delete: op) + + maybe_send_bulk + end + + def build_op(ref, proxy) + op = { + _index: proxy.index_name, + _type: proxy.document_type, + _id: ref.es_id + } + + op[:_routing] = ref.es_parent if ref.es_parent # blank for projects + + op + end + + def bulk_limit_bytes + Gitlab::CurrentSettings.elasticsearch_max_bulk_size_mb.megabytes + end + + def submit(*hashes) + hashes.each do |hash| + text = hash.to_json + + body.push(text) + @body_size_bytes += text.bytesize + 2 # Account for newlines + end + end + + def maybe_send_bulk(force: false) + return self if body.empty? + return self if body_size_bytes < bulk_limit_bytes && !force + + failed_refs = try_send_bulk + + logger.info( + message: 'bulk_submitted', + body_size_bytes: body_size_bytes, + bulk_count: ref_cache.count, + errors_count: failed_refs.count + ) + + failures.push(*failed_refs) + + reset! + + self + end + + def try_send_bulk + process_errors(client.bulk(body: body)) + rescue => err + # If an exception is raised, treat the entire bulk as failed + logger.error(message: 'bulk_exception', error_class: err.class.to_s, error_message: err.message) + + ref_cache + end + + def process_errors(result) + return [] unless result['errors'] + + out = [] + + # Items in the response have the same order as items in the request. + # + # Example succces: {"index": {"result": "created", "status": 201}} + # Example failure: {"index": {"error": {...}, "status": 400}} + result['items'].each_with_index do |item, i| + op = item['index'] || item['delete'] + + if op.nil? || op['error'] + logger.warn(message: 'bulk_error', item: item) + out << ref_cache[i] + end + end + + out + end + end + end +end diff --git a/ee/lib/gitlab/elastic/document_reference.rb b/ee/lib/gitlab/elastic/document_reference.rb new file mode 100644 index 0000000000000000000000000000000000000000..7a586f296638868705b95a2fdea617118df88fbb --- /dev/null +++ b/ee/lib/gitlab/elastic/document_reference.rb @@ -0,0 +1,95 @@ +# frozen_string_literal: true + +module Gitlab + module Elastic + # Tracks some essential information needed to tie database and elasticsearch + # records together, and to delete ES documents when the database object no + # longer exists. + # + # A custom serialisation format suitable for Redis is included. + class DocumentReference + include Gitlab::Utils::StrongMemoize + + InvalidError = Class.new(StandardError) + + class << self + def build(instance) + new(instance.class, instance.id, instance.es_id, instance.es_parent) + end + + def serialize(anything) + case anything + when String + anything + when Gitlab::Elastic::DocumentReference + anything.serialize + when ApplicationRecord + serialize_record(anything) + when Array + serialize_array(anything) + else + raise InvalidError.new("Don't know how to serialize #{anything.class}") + end + end + + def serialize_record(record) + serialize_array([record.class.to_s, record.id, record.es_id, record.es_parent].compact) + end + + def serialize_array(array) + test_array!(array) + + array.join(' ') + end + + def deserialize(string) + deserialize_array(string.split(' ')) + end + + def deserialize_array(array) + test_array!(array) + + new(*array) + end + + private + + def test_array!(array) + raise InvalidError.new("Bad array representation: #{array.inspect}") unless + (3..4).cover?(array.size) + end + end + + attr_reader :klass, :db_id, :es_id + + # This attribute is nil for some records, e.g., projects + attr_reader :es_parent + + def initialize(klass_or_name, db_id, es_id, es_parent = nil) + @klass = klass_or_name + @klass = klass_or_name.constantize if @klass.is_a?(String) + @db_id = db_id + @es_id = es_id + @es_parent = es_parent + end + + def ==(other) + other.instance_of?(self.class) && + self.serialize == other.serialize + end + + def klass_name + klass.to_s + end + + # TODO: return a promise for batch loading: https://gitlab.com/gitlab-org/gitlab/issues/207280 + def database_record + strong_memoize(:database_record) { klass.find_by_id(db_id) } + end + + def serialize + self.class.serialize_array([klass_name, db_id, es_id, es_parent].compact) + end + end + end +end diff --git a/ee/spec/controllers/analytics/analytics_controller_spec.rb b/ee/spec/controllers/analytics/analytics_controller_spec.rb index 2dd509a1cde8c09773a1757819ad9d9bc93b1071..e5c793bb862fa49b6f35f1443d7b9322be8f0559 100644 --- a/ee/spec/controllers/analytics/analytics_controller_spec.rb +++ b/ee/spec/controllers/analytics/analytics_controller_spec.rb @@ -9,6 +9,7 @@ before do stub_feature_flags(group_level_productivity_analytics: false) + stub_feature_flags(group_level_cycle_analytics: false) sign_in(user) disable_all_analytics_feature_flags @@ -33,10 +34,22 @@ end end - it 'renders 404 all the analytics feature flags are disabled' do + it 'renders devops score page when all the analytics feature flags are disabled' do get :index - expect(response).to have_gitlab_http_status(:not_found) + expect(response).to redirect_to(instance_statistics_dev_ops_score_index_path) + end + + context 'when instance statistics is private' do + before do + stub_application_setting(instance_statistics_visibility_private: true) + end + + it 'renders 404, not found' do + get :index + + expect(response).to have_gitlab_http_status(:not_found) + end end end end diff --git a/ee/spec/controllers/groups/analytics/cycle_analytics_controller_spec.rb b/ee/spec/controllers/groups/analytics/cycle_analytics_controller_spec.rb new file mode 100644 index 0000000000000000000000000000000000000000..728515533ed1da277207d10d392a3f3a25618faa --- /dev/null +++ b/ee/spec/controllers/groups/analytics/cycle_analytics_controller_spec.rb @@ -0,0 +1,39 @@ +# frozen_string_literal: true + +require 'spec_helper' + +describe Analytics::CycleAnalyticsController do + let(:user) { create(:user) } + + before do + sign_in(user) + end + + describe 'usage counter' do + it 'increments usage counter' do + expect(Gitlab::UsageDataCounters::CycleAnalyticsCounter).to receive(:count).with(:views) + + get(:show) + + expect(response).to be_successful + end + end + + describe 'GET show' do + it 'renders `show` template' do + stub_feature_flags(Gitlab::Analytics::CYCLE_ANALYTICS_FEATURE_FLAG => true) + + get :show + + expect(response).to render_template :show + end + + it 'renders `404` when feature flag is disabled' do + stub_feature_flags(Gitlab::Analytics::CYCLE_ANALYTICS_FEATURE_FLAG => false) + + get :show + + expect(response).to have_gitlab_http_status(:not_found) + end + end +end diff --git a/ee/spec/controllers/groups/contribution_analytics_controller_spec.rb b/ee/spec/controllers/groups/contribution_analytics_controller_spec.rb index a4b02fd2ba79b7b678dde47050d6a1a53e8f8143..24e25802aa705e9df08d189300c9096709a09e09 100644 --- a/ee/spec/controllers/groups/contribution_analytics_controller_spec.rb +++ b/ee/spec/controllers/groups/contribution_analytics_controller_spec.rb @@ -6,6 +6,7 @@ let(:user) { create(:user) } let(:user2) { create(:user) } let(:user3) { create(:user) } + let(:guest_user) { create(:user) } let(:group) { create(:group) } let(:project) { create(:project, :repository, group: group) } let(:issue) { create(:issue, project: project) } @@ -30,128 +31,198 @@ def create_push_event(author, project) group.add_owner(user) group.add_user(user2, GroupMember::DEVELOPER) group.add_user(user3, GroupMember::MAINTAINER) - sign_in(user) - - create_event(user, project, issue, Event::CLOSED) - create_event(user2, project, issue, Event::CLOSED) - create_event(user2, project, merge_request, Event::CREATED) - create_event(user3, project, merge_request, Event::CREATED) - create_push_event(user, project) - create_push_event(user3, project) end - it 'returns 404 when feature is not available and we dont show promotions' do - stub_licensed_features(contribution_analytics: false) + describe '#authorize_read_contribution_analytics!' do + before do + group.add_user(guest_user, GroupMember::GUEST) + sign_in(guest_user) + end - get :show, params: { group_id: group.path } + context 'when user has access to the group' do + let(:request) { get :show, params: { group_id: group.path } } - expect(response).to have_gitlab_http_status(:not_found) - end + context 'when feature is available to the group' do + before do + allow(License).to receive(:feature_available?).and_call_original + allow(License).to receive(:feature_available?) + .with(:contribution_analytics) + .and_return(true) - context 'unlicensed but we show promotions' do - before do - allow(License).to receive(:current).and_return(nil) - allow(LicenseHelper).to receive(:show_promotions?).and_return(true) - stub_application_setting(check_namespace_plan: false) - end + allow(Ability).to receive(:allowed?).and_call_original + allow(Ability).to receive(:allowed?) + .with(guest_user, :read_group_contribution_analytics, group) + .and_return(user_has_access_to_feature) + end - it 'returns page when feature is not available and we show promotions' do - stub_licensed_features(contribution_analytics: false) + context 'when user has access to the feature' do + let(:user_has_access_to_feature) { true } - get :show, params: { group_id: group.path } + it 'renders 200' do + request - expect(response).to have_gitlab_http_status(:ok) + expect(response).to have_gitlab_http_status(:ok) + end + end + + context 'when user does not have access to the feature' do + let(:user_has_access_to_feature) { false } + + it 'renders 403' do + request + + expect(response).to have_gitlab_http_status(:forbidden) + end + end + end end - end - it 'sets instance variables properly', :aggregate_failures do - get :show, params: { group_id: group.path } + describe '#check_contribution_analytics_available!' do + before do + group.add_user(guest_user, GroupMember::GUEST) + sign_in(guest_user) + end - expect(response).to have_gitlab_http_status(:ok) + context 'when feature is not available to the group' do + let(:request) { get :show, params: { group_id: group.path } } - expect(assigns[:data_collector].users).to match_array([user, user2, user3]) - expect(assigns[:data_collector].total_events_by_author_count.values.sum).to eq(6) - stats = assigns[:data_collector].group_member_contributions_table_data + before do + allow(License).to receive(:feature_available?).and_call_original + allow(License).to receive(:feature_available?) + .with(:contribution_analytics) + .and_return(false) - # NOTE: The array ordering matters! The view references them all by index - expect(stats[:merge_requests_created][:data]).to eq([0, 1, 1]) - expect(stats[:issues_closed][:data]).to eq([1, 1, 0]) - expect(stats[:push][:data]).to eq([1, 0, 1]) - end + allow(LicenseHelper).to receive(:show_promotions?) + .and_return(show_promotions) + end - it "returns member contributions JSON when format is JSON" do - get :show, params: { group_id: group.path }, format: :json - - expect(json_response.length).to eq(3) - - first_user = json_response.at(0) - expect(first_user["username"]).to eq(user.username) - expect(first_user["user_web_url"]).to eq("/#{user.username}") - expect(first_user["fullname"]).to eq(user.name) - expect(first_user["push"]).to eq(1) - expect(first_user["issues_created"]).to eq(0) - expect(first_user["issues_closed"]).to eq(1) - expect(first_user["merge_requests_created"]).to eq(0) - expect(first_user["merge_requests_merged"]).to eq(0) - expect(first_user["total_events"]).to eq(2) - end + context 'when promotions are on' do + let(:show_promotions) { true } + + it 'renders promotions page' do + request - it "includes projects in subgroups" do - subgroup = create(:group, parent: group) - subproject = create(:project, :repository, group: subgroup) + expect(response).to render_template( + 'shared/promotions/_promote_contribution_analytics') + end + end - create_event(user, subproject, issue, Event::CLOSED) - create_push_event(user, subproject) + context 'when promotions are not on' do + let(:show_promotions) { false } - get :show, params: { group_id: group.path }, format: :json + it 'renders 404' do + request - first_user = json_response.first - expect(first_user["issues_closed"]).to eq(2) - expect(first_user["push"]).to eq(2) + expect(response).to have_gitlab_http_status(:not_found) + end + end + end + end end - it "excludes projects outside of the group" do - empty_group = create(:group) - other_project = create(:project, :repository) + describe 'with contributions' do + before do + sign_in(user) - create_event(user, other_project, issue, Event::CLOSED) - create_push_event(user, other_project) + create_event(user, project, issue, Event::CLOSED) + create_event(user2, project, issue, Event::CLOSED) + create_event(user2, project, merge_request, Event::CREATED) + create_event(user3, project, merge_request, Event::CREATED) + create_push_event(user, project) + create_push_event(user3, project) + end - get :show, params: { group_id: empty_group.path }, format: :json + it 'sets instance variables properly', :aggregate_failures do + get :show, params: { group_id: group.path } - expect(json_response).to be_empty - end + expect(response).to have_gitlab_http_status(:ok) + + expect(assigns[:data_collector].users).to match_array([user, user2, user3]) + expect(assigns[:data_collector].total_events_by_author_count.values.sum).to eq(6) + stats = assigns[:data_collector].group_member_contributions_table_data - it 'does not cause N+1 queries when the format is JSON' do - control_count = ActiveRecord::QueryRecorder.new do + # NOTE: The array ordering matters! The view references them all by index + expect(stats[:merge_requests_created][:data]).to eq([0, 1, 1]) + expect(stats[:issues_closed][:data]).to eq([1, 1, 0]) + expect(stats[:push][:data]).to eq([1, 0, 1]) + end + + it "returns member contributions JSON when format is JSON" do get :show, params: { group_id: group.path }, format: :json + + expect(json_response.length).to eq(3) + + first_user = json_response.at(0) + expect(first_user["username"]).to eq(user.username) + expect(first_user["user_web_url"]).to eq("/#{user.username}") + expect(first_user["fullname"]).to eq(user.name) + expect(first_user["push"]).to eq(1) + expect(first_user["issues_created"]).to eq(0) + expect(first_user["issues_closed"]).to eq(1) + expect(first_user["merge_requests_created"]).to eq(0) + expect(first_user["merge_requests_merged"]).to eq(0) + expect(first_user["total_events"]).to eq(2) end - controller.instance_variable_set(:@group, nil) - user4 = create(:user) - group.add_user(user4, GroupMember::DEVELOPER) + it "includes projects in subgroups" do + subgroup = create(:group, parent: group) + subproject = create(:project, :repository, group: subgroup) - expect { get :show, params: { group_id: group.path }, format: :json } - .not_to exceed_query_limit(control_count) - end + create_event(user, subproject, issue, Event::CLOSED) + create_push_event(user, subproject) - describe 'with views' do - render_views + get :show, params: { group_id: group.path }, format: :json - it 'avoids a N+1 query in #show' do - # Warm the cache - get :show, params: { group_id: group.path } + first_user = json_response.first + expect(first_user["issues_closed"]).to eq(2) + expect(first_user["push"]).to eq(2) + end - control_queries = ActiveRecord::QueryRecorder.new { get :show, params: { group_id: group.path } } - create_push_event(user, project) + it "excludes projects outside of the group" do + empty_group = create(:group) + other_project = create(:project, :repository) + + empty_group.add_reporter(user) + + create_event(user, other_project, issue, Event::CLOSED) + create_push_event(user, other_project) + + get :show, params: { group_id: empty_group.path }, format: :json - expect { get :show, params: { group_id: group.path } }.not_to exceed_query_limit(control_queries) + expect(json_response).to be_empty end - end - describe 'GET #index' do - subject { get :show, params: { group_id: group.to_param } } + it 'does not cause N+1 queries when the format is JSON' do + control_count = ActiveRecord::QueryRecorder.new do + get :show, params: { group_id: group.path }, format: :json + end + + controller.instance_variable_set(:@group, nil) + user4 = create(:user) + group.add_user(user4, GroupMember::DEVELOPER) + + expect { get :show, params: { group_id: group.path }, format: :json } + .not_to exceed_query_limit(control_count) + end + + describe 'with views' do + render_views + + it 'avoids a N+1 query in #show' do + # Warm the cache + get :show, params: { group_id: group.path } - it_behaves_like 'disabled when using an external authorization service' + control_queries = ActiveRecord::QueryRecorder.new { get :show, params: { group_id: group.path } } + create_push_event(user, project) + + expect { get :show, params: { group_id: group.path } }.not_to exceed_query_limit(control_queries) + end + end + + describe 'GET #index' do + subject { get :show, params: { group_id: group.to_param } } + + it_behaves_like 'disabled when using an external authorization service' + end end end diff --git a/ee/spec/features/analytics/analytics_workspace_spec.rb b/ee/spec/features/analytics/analytics_workspace_spec.rb index c910ec1b7e8cee2509f2d699317a8fb1405e57a6..d80615b0da0a8f66f23c71b9226750f6469f5a9f 100644 --- a/ee/spec/features/analytics/analytics_workspace_spec.rb +++ b/ee/spec/features/analytics/analytics_workspace_spec.rb @@ -10,14 +10,6 @@ sign_in(user) end - it 'renders 404 if analytics features are turned off' do - disable_all_analytics_feature_flags - - visit analytics_root_path - - expect(page.status_code).to eq(404) - end - it 'renders the productivity analytics landing page' do stub_licensed_features(Gitlab::Analytics::PRODUCTIVITY_ANALYTICS_FEATURE_FLAG => true) diff --git a/ee/spec/features/search/elastic/project_search_spec.rb b/ee/spec/features/search/elastic/project_search_spec.rb index 332ef010b258815b577d9cfcc8c77c53f6b6da70..c7371b93541a0f5518b60203f079d6aa71f36e2c 100644 --- a/ee/spec/features/search/elastic/project_search_spec.rb +++ b/ee/spec/features/search/elastic/project_search_spec.rb @@ -16,8 +16,9 @@ end describe 'searching' do - it 'finds issues', :sidekiq_might_not_need_inline do + it 'finds issues', :sidekiq_inline do create(:issue, project: project, title: 'Test searching for an issue') + ensure_elasticsearch_index! submit_search('Test') select_search_scope('Issues') @@ -25,8 +26,9 @@ expect(page).to have_selector('.results', text: 'Test searching for an issue') end - it 'finds merge requests', :sidekiq_might_not_need_inline do + it 'finds merge requests', :sidekiq_inline do create(:merge_request, source_project: project, target_project: project, title: 'Test searching for an MR') + ensure_elasticsearch_index! submit_search('Test') select_search_scope('Merge requests') @@ -34,8 +36,9 @@ expect(page).to have_selector('.results', text: 'Test searching for an MR') end - it 'finds milestones', :sidekiq_might_not_need_inline do + it 'finds milestones', :sidekiq_inline do create(:milestone, project: project, title: 'Test searching for a milestone') + ensure_elasticsearch_index! submit_search('Test') select_search_scope('Milestones') @@ -43,7 +46,7 @@ expect(page).to have_selector('.results', text: 'Test searching for a milestone') end - it 'finds wiki pages', :sidekiq_might_not_need_inline do + it 'finds wiki pages', :sidekiq_inline do project.wiki.create_page('test.md', 'Test searching for a wiki page') project.wiki.index_wiki_blobs @@ -53,8 +56,9 @@ expect(page).to have_selector('.results', text: 'Test searching for a wiki page') end - it 'finds notes', :sidekiq_might_not_need_inline do + it 'finds notes', :sidekiq_inline do create(:note, project: project, note: 'Test searching for a comment') + ensure_elasticsearch_index! submit_search('Test') select_search_scope('Comments') @@ -62,7 +66,7 @@ expect(page).to have_selector('.results', text: 'Test searching for a comment') end - it 'finds commits', :sidekiq_might_not_need_inline do + it 'finds commits', :sidekiq_inline do project.repository.index_commits_and_blobs submit_search('initial') @@ -71,7 +75,7 @@ expect(page).to have_selector('.results', text: 'Initial commit') end - it 'finds blobs', :sidekiq_might_not_need_inline do + it 'finds blobs', :sidekiq_inline do project.repository.index_commits_and_blobs submit_search('def') diff --git a/ee/spec/frontend/analytics/cycle_analytics/components/base_spec.js b/ee/spec/frontend/analytics/cycle_analytics/components/base_spec.js index 04804a8003df05aa6abc66c45a5b98c047723908..2c376b6cc7d7ab66f4963a49611177f59aac4275 100644 --- a/ee/spec/frontend/analytics/cycle_analytics/components/base_spec.js +++ b/ee/spec/frontend/analytics/cycle_analytics/components/base_spec.js @@ -22,6 +22,7 @@ import * as mockData from '../mock_data'; const noDataSvgPath = 'path/to/no/data'; const noAccessSvgPath = 'path/to/no/access'; const emptyStateSvgPath = 'path/to/empty/state'; +const hideGroupDropDown = false; const localVue = createLocalVue(); localVue.use(Vuex); @@ -40,6 +41,7 @@ function createComponent({ scatterplotEnabled = true, tasksByTypeChartEnabled = true, customizableCycleAnalyticsEnabled = false, + props = {}, } = {}) { const func = shallow ? shallowMount : mount; const comp = func(Component, { @@ -50,6 +52,8 @@ function createComponent({ noDataSvgPath, noAccessSvgPath, baseStagesEndpoint: mockData.endpoints.baseStagesEndpoint, + hideGroupDropDown, + ...props, }, provide: { glFeatures: { @@ -165,6 +169,21 @@ describe('Cycle Analytics component', () => { it('does not display the duration scatter plot', () => { displaysDurationScatterPlot(false); }); + + describe('hideGroupDropDown = true', () => { + beforeEach(() => { + mock = new MockAdapter(axios); + wrapper = createComponent({ + props: { + hideGroupDropDown: true, + }, + }); + }); + + it('does not render the group dropdown', () => { + expect(wrapper.find(GroupsDropdownFilter).exists()).toBe(false); + }); + }); }); describe('after a filter has been selected', () => { diff --git a/ee/spec/frontend/boards/components/board_list_selector/assignees_list_item_spec.js b/ee/spec/frontend/boards/components/board_list_selector/assignees_list_item_spec.js index 043766c931850a58fae77131bb3a7040cb1e1668..84835b078194a6c1cf46a857fb34d41389386a8a 100644 --- a/ee/spec/frontend/boards/components/board_list_selector/assignees_list_item_spec.js +++ b/ee/spec/frontend/boards/components/board_list_selector/assignees_list_item_spec.js @@ -1,68 +1,52 @@ -import Vue from 'vue'; - +import { shallowMount } from '@vue/test-utils'; import AssigneesListItem from 'ee/boards/components/boards_list_selector/assignees_list_item.vue'; -import mountComponent from 'helpers/vue_mount_component_helper'; - import { mockAssigneesList } from 'jest/boards/mock_data'; -const createComponent = () => { - const Component = Vue.extend(AssigneesListItem); - - return mountComponent(Component, { - item: mockAssigneesList[0], - }); -}; - describe('AssigneesListItem', () => { - let vm; + const assignee = mockAssigneesList[0]; + let wrapper; beforeEach(() => { - vm = createComponent(); + wrapper = shallowMount(AssigneesListItem, { + propsData: { + item: assignee, + }, + }); }); afterEach(() => { - vm.$destroy(); + wrapper.destroy(); }); - describe('computed', () => { - describe('avatarAltText', () => { - it('returns computed alt text based on assignee.name', () => { - expect(vm.avatarAltText).toBe(`${mockAssigneesList[0].name}'s avatar`); - }); - }); + it('renders component container element with class `filter-dropdown-item`', () => { + expect(wrapper.contains('.filter-dropdown-item')).toBe(true); }); - describe('methods', () => { - describe('handleItemClick', () => { - it('emits `onItemSelect` event on component and sends `assignee` as event param', () => { - jest.spyOn(vm, '$emit'); - const assignee = mockAssigneesList[0]; - - vm.handleItemClick(); + it('emits `onItemSelect` event on component click and sends `assignee` as event param', () => { + wrapper.find('.filter-dropdown-item').trigger('click'); - expect(vm.$emit).toHaveBeenCalledWith('onItemSelect', assignee); - }); - }); + expect(wrapper.emitted().onItemSelect[0]).toEqual([assignee]); }); - describe('template', () => { - it('renders component container element with class `filter-dropdown-item`', () => { - expect(vm.$el.classList.contains('filter-dropdown-item')).toBe(true); + describe('avatar', () => { + it('has alt text', () => { + expect(wrapper.find('.avatar').attributes('alt')).toBe(`${assignee.name}'s avatar`); }); - it('renders user item button element', () => { - const assignee = mockAssigneesList[0]; - const buttonEl = vm.$el.querySelector('.dropdown-user'); + it('has src url', () => { + expect(wrapper.find('.avatar').attributes('src')).toBe(assignee.avatar_url); + }); + }); - expect(buttonEl).not.toBeNull(); - expect( - buttonEl.querySelector('.avatar-container.s32 img.avatar.s32').getAttribute('src'), - ).toBe(assignee.avatar_url); + describe('user details', () => { + it('shows assignee name', () => { + expect(wrapper.find('.dropdown-user-details').text()).toContain(assignee.name); + }); - expect(buttonEl.querySelector('.dropdown-user-details').innerText).toContain(assignee.name); - expect( - buttonEl.querySelector('.dropdown-user-details .dropdown-light-content').innerText, - ).toContain(`@${assignee.username}`); + it('shows assignee username', () => { + expect(wrapper.find('.dropdown-user-details .dropdown-light-content').text()).toContain( + `@${assignee.username}`, + ); }); }); }); diff --git a/ee/spec/frontend/boards/components/board_list_selector/list_container_spec.js b/ee/spec/frontend/boards/components/board_list_selector/list_container_spec.js index 471bc005db8fdc4a27e29875c3ca891c40a85ae5..3351ddc0a70d849b477383c760eae6068ef6bc2d 100644 --- a/ee/spec/frontend/boards/components/board_list_selector/list_container_spec.js +++ b/ee/spec/frontend/boards/components/board_list_selector/list_container_spec.js @@ -1,55 +1,51 @@ +import { shallowMount } from '@vue/test-utils'; import Vue from 'vue'; - import ListContainer from 'ee/boards/components/boards_list_selector/list_container.vue'; -import mountComponent from 'helpers/vue_mount_component_helper'; - +import ListFilter from 'ee/boards/components/boards_list_selector/list_filter.vue'; +import ListContent from 'ee/boards/components/boards_list_selector/list_content.vue'; import { mockAssigneesList } from 'jest/boards/mock_data'; -const createComponent = () => { - const Component = Vue.extend(ListContainer); - - return mountComponent(Component, { - loading: false, - items: mockAssigneesList, - listType: 'assignees', - }); -}; - describe('ListContainer', () => { - let vm; + let wrapper; beforeEach(() => { - vm = createComponent(); + wrapper = shallowMount(ListContainer, { + propsData: { + loading: false, + items: mockAssigneesList, + listType: 'assignees', + }, + }); }); afterEach(() => { - vm.$destroy(); + wrapper.destroy(); }); describe('computed', () => { describe('filteredItems', () => { it('returns assignees list as it is when `query` is empty', () => { - vm.query = ''; + wrapper.setData({ query: '' }); - expect(vm.filteredItems.length).toBe(mockAssigneesList.length); + expect(wrapper.vm.filteredItems.length).toBe(mockAssigneesList.length); }); it('returns filtered assignees list as it is when `query` has name', () => { const assignee = mockAssigneesList[0]; - vm.query = assignee.name; + wrapper.setData({ query: assignee.name }); - expect(vm.filteredItems.length).toBe(1); - expect(vm.filteredItems[0].name).toBe(assignee.name); + expect(wrapper.vm.filteredItems.length).toBe(1); + expect(wrapper.vm.filteredItems[0].name).toBe(assignee.name); }); it('returns filtered assignees list as it is when `query` has username', () => { const assignee = mockAssigneesList[0]; - vm.query = assignee.username; + wrapper.setData({ query: assignee.username }); - expect(vm.filteredItems.length).toBe(1); - expect(vm.filteredItems[0].username).toBe(assignee.username); + expect(wrapper.vm.filteredItems.length).toBe(1); + expect(wrapper.vm.filteredItems[0].username).toBe(assignee.username); }); }); }); @@ -58,39 +54,39 @@ describe('ListContainer', () => { describe('handleSearch', () => { it('sets value of param `query` to component prop `query`', () => { const query = 'foobar'; - vm.handleSearch(query); + wrapper.vm.handleSearch(query); - expect(vm.query).toBe(query); + expect(wrapper.vm.query).toBe(query); }); }); describe('handleItemClick', () => { it('emits `onItemSelect` event on component and sends `assignee` as event param', () => { - jest.spyOn(vm, '$emit'); const assignee = mockAssigneesList[0]; - vm.handleItemClick(assignee); + wrapper.vm.handleItemClick(assignee); - expect(vm.$emit).toHaveBeenCalledWith('onItemSelect', assignee); + expect(wrapper.emitted().onItemSelect[0]).toEqual([assignee]); }); }); }); describe('template', () => { it('renders component container element with class `dropdown-assignees-list`', () => { - expect(vm.$el.classList.contains('dropdown-assignees-list')).toBe(true); + expect(wrapper.classes('dropdown-assignees-list')).toBe(true); }); it('renders loading animation when prop `loading` is true', () => { - vm.loading = true; + wrapper.setProps({ loading: true }); + return Vue.nextTick().then(() => { - expect(vm.$el.querySelector('.dropdown-loading')).not.toBeNull(); + expect(wrapper.find('.dropdown-loading').exists()).toBe(true); }); }); it('renders dropdown body elements', () => { - expect(vm.$el.querySelector('.dropdown-input')).not.toBeNull(); - expect(vm.$el.querySelector('.dropdown-content')).not.toBeNull(); + expect(wrapper.find(ListFilter).exists()).toBe(true); + expect(wrapper.find(ListContent).exists()).toBe(true); }); }); }); diff --git a/ee/spec/frontend/boards/components/board_list_selector/list_content_spec.js b/ee/spec/frontend/boards/components/board_list_selector/list_content_spec.js index 7b23abb2263b59ca0f30a36d1caca7b0128c59a8..2ffe7cfddd450e2db0748b30fa15e66aaef998cd 100644 --- a/ee/spec/frontend/boards/components/board_list_selector/list_content_spec.js +++ b/ee/spec/frontend/boards/components/board_list_selector/list_content_spec.js @@ -1,50 +1,36 @@ -import Vue from 'vue'; - +import { shallowMount } from '@vue/test-utils'; import ListContent from 'ee/boards/components/boards_list_selector/list_content.vue'; -import mountComponent from 'helpers/vue_mount_component_helper'; - import { mockAssigneesList } from 'jest/boards/mock_data'; -const createComponent = () => { - const Component = Vue.extend(ListContent); - - return mountComponent(Component, { - items: mockAssigneesList, - listType: 'assignees', - }); -}; - describe('ListContent', () => { - let vm; + let wrapper; beforeEach(() => { - vm = createComponent(); + wrapper = shallowMount(ListContent, { + propsData: { + items: mockAssigneesList, + listType: 'assignees', + }, + }); }); afterEach(() => { - vm.$destroy(); + wrapper.destroy(); }); - describe('methods', () => { - describe('handleItemClick', () => { - it('emits `onItemSelect` event on component and sends `assignee` as event param', () => { - jest.spyOn(vm, '$emit'); - const assignee = mockAssigneesList[0]; + it('emits `onItemSelect` event on component and sends `assignee` as event param', () => { + const assignee = mockAssigneesList[0]; - vm.handleItemClick(assignee); + wrapper.vm.handleItemClick(assignee); - expect(vm.$emit).toHaveBeenCalledWith('onItemSelect', assignee); - }); - }); + expect(wrapper.emitted().onItemSelect[0]).toEqual([assignee]); }); - describe('template', () => { - it('renders component container element with class `dropdown-content`', () => { - expect(vm.$el.classList.contains('dropdown-content')).toBe(true); - }); + it('renders component container element with class `dropdown-content`', () => { + expect(wrapper.classes('dropdown-content')).toBe(true); + }); - it('renders UL parent element as child within container', () => { - expect(vm.$el.querySelector('ul')).not.toBeNull(); - }); + it('renders UL parent element as child within container', () => { + expect(wrapper.find('ul').exists()).toBe(true); }); }); diff --git a/ee/spec/frontend/boards/components/board_list_selector/list_filter_spec.js b/ee/spec/frontend/boards/components/board_list_selector/list_filter_spec.js index 705f63ad1ab0509249fb4ca7cf6f0988c9b4fb14..3a6588aa4d267a8584e7f4b418b64b73796b24d6 100644 --- a/ee/spec/frontend/boards/components/board_list_selector/list_filter_spec.js +++ b/ee/spec/frontend/boards/components/board_list_selector/list_filter_spec.js @@ -1,80 +1,81 @@ +import { shallowMount } from '@vue/test-utils'; import Vue from 'vue'; - import ListFilter from 'ee/boards/components/boards_list_selector/list_filter.vue'; -import mountComponent from 'helpers/vue_mount_component_helper'; - -const createComponent = () => { - const Component = Vue.extend(ListFilter); - - return mountComponent(Component); -}; describe('ListFilter', () => { - let vm; + let wrapper; beforeEach(() => { - vm = createComponent(); + wrapper = shallowMount(ListFilter); }); afterEach(() => { - vm.$destroy(); + wrapper.destroy(); }); - describe('methods', () => { - describe('handleInputChange', () => { - it('emits `onSearchInput` event on component and sends `query` as event param', () => { - jest.spyOn(vm, '$emit'); - const query = 'foobar'; - vm.query = query; + describe('input field', () => { + it('emits `onSearchInput` event on keyup and sends input text as event param', () => { + const input = wrapper.find('input'); + input.setValue('foobar'); + input.trigger('keyup'); + + expect(wrapper.emitted().onSearchInput[0]).toEqual(['foobar']); + }); + }); - vm.handleInputChange(); + describe('clear button', () => { + let input; - expect(vm.$emit).toHaveBeenCalledWith('onSearchInput', query); - }); + beforeEach(() => { + // Pre-populate input field with text + input = wrapper.find('input'); + input.setValue('foobar'); + input.trigger('keyup'); }); - describe('handleInputClear', () => { - it('clears value of prop `query` and calls `handleInputChange` method on component', () => { - jest.spyOn(vm, 'handleInputChange'); - vm.query = 'foobar'; + it('clears input field and emits `onSearchInput` event with empty value', () => { + expect(input.element.value).toBe('foobar'); - vm.handleInputClear(); + wrapper.find('.dropdown-input-clear').trigger('click'); - expect(vm.query).toBe(''); - expect(vm.handleInputChange).toHaveBeenCalled(); + return Vue.nextTick().then(() => { + expect(input.element.value).toBe(''); + expect(wrapper.emitted().onSearchInput[1]).toEqual(['']); }); }); }); describe('template', () => { it('renders component container element with class `dropdown-input`', () => { - expect(vm.$el.classList.contains('dropdown-input')).toBe(true); + expect(wrapper.classes('dropdown-input')).toBe(true); }); it('renders class `has-value` on container element when prop `query` is not empty', () => { - vm.query = 'foobar'; + wrapper.setData({ query: 'foobar' }); + return Vue.nextTick().then(() => { - expect(vm.$el.classList.contains('has-value')).toBe(true); + expect(wrapper.classes('has-value')).toBe(true); }); }); it('removes class `has-value` from container element when prop `query` is empty', () => { - vm.query = ''; + wrapper.setData({ query: '' }); + return Vue.nextTick().then(() => { - expect(vm.$el.classList.contains('has-value')).toBe(false); + expect(wrapper.classes('has-value')).toBe(false); }); }); it('renders search input element', () => { - const inputEl = vm.$el.querySelector('input.dropdown-input-field'); + const inputEl = wrapper.find('input.dropdown-input-field'); - expect(inputEl).not.toBeNull(); - expect(inputEl.getAttribute('placeholder')).toBe('Search'); + expect(inputEl.exists()).toBe(true); + expect(inputEl.attributes('placeholder')).toBe('Search'); }); it('renders search input icons', () => { - expect(vm.$el.querySelector('i.fa.fa-search.dropdown-input-search')).not.toBeNull(); - expect(vm.$el.querySelector('i.fa.fa-times.dropdown-input-clear')).not.toBeNull(); + expect(wrapper.find('i.fa.fa-search.dropdown-input-search').exists()).toBe(true); + expect(wrapper.find('i.fa.fa-times.dropdown-input-clear').exists()).toBe(true); }); }); }); diff --git a/ee/spec/frontend/boards/components/issue_card_weight_spec.js b/ee/spec/frontend/boards/components/issue_card_weight_spec.js index 6a5e0e9b5c29a37d4847f35151fc92e973a33cd6..71c5ac0f7543e375a870d09a5618f22749216174 100644 --- a/ee/spec/frontend/boards/components/issue_card_weight_spec.js +++ b/ee/spec/frontend/boards/components/issue_card_weight_spec.js @@ -1,59 +1,53 @@ -import Vue from 'vue'; +import { shallowMount } from '@vue/test-utils'; import IssueCardWeight from 'ee/boards/components/issue_card_weight.vue'; -import mountComponent from 'helpers/vue_mount_component_helper'; -describe('IssueCardWeight component', () => { - let vm; - let Component; - - beforeAll(() => { - Component = Vue.extend(IssueCardWeight); +function mountIssueCardWeight(propsData) { + return shallowMount(IssueCardWeight, { + propsData, }); +} + +describe('IssueCardWeight', () => { + let wrapper; afterEach(() => { - vm.$destroy(); + wrapper.destroy(); }); - it('renders weight', () => { - vm = mountComponent(Component, { - weight: 5, + describe('weight text', () => { + it('shows 0 when weight is 0', () => { + wrapper = mountIssueCardWeight({ + weight: 0, + }); + + expect(wrapper.find('.board-card-info-text').text()).toContain(0); }); - expect(vm.$el.querySelector('.board-card-info-text').innerText).toContain('5'); + it('shows 5 when weight is 5', () => { + wrapper = mountIssueCardWeight({ + weight: 5, + }); + + expect(wrapper.find('.board-card-info-text').text()).toContain('5'); + }); }); it('renders a link when no tag is specified', () => { - vm = mountComponent(Component, { + wrapper = mountIssueCardWeight({ weight: 2, }); - expect(vm.$el.querySelector('a.board-card-info')).toBeDefined(); + expect(wrapper.find('span.board-card-info').exists()).toBe(false); + expect(wrapper.find('a.board-card-info').exists()).toBe(true); }); it('renders the tag when it is explicitly specified', () => { - vm = mountComponent(Component, { + wrapper = mountIssueCardWeight({ weight: 2, tagName: 'span', }); - expect(vm.$el.querySelector('span.board-card-info')).toBeDefined(); - expect(vm.$el.querySelector('a.board-card-info')).toBeNull(); - }); - - describe('with weight=0', () => { - beforeEach(() => { - vm = mountComponent(Component, { - weight: 0, - }); - }); - - afterEach(() => { - vm.$destroy(); - }); - - it('renders weight', () => { - expect(vm.$el.querySelector('.board-card-info-text')).toBeDefined(); - expect(vm.$el.querySelector('.board-card-info-text').innerText).toContain(0); - }); + expect(wrapper.find('span.board-card-info').exists()).toBe(true); + expect(wrapper.find('a.board-card-info').exists()).toBe(false); }); }); diff --git a/ee/spec/frontend/geo_node_form/components/geo_node_form_capacities_spec.js b/ee/spec/frontend/geo_node_form/components/geo_node_form_capacities_spec.js index 4012f605f3f99e58826587489dd22a27691a2fd9..a136c0a2fc1783dd3fcb2b4ff36053d6f8344270 100644 --- a/ee/spec/frontend/geo_node_form/components/geo_node_form_capacities_spec.js +++ b/ee/spec/frontend/geo_node_form/components/geo_node_form_capacities_spec.js @@ -22,17 +22,17 @@ describe('GeoNodeFormCapacities', () => { const findGeoNodeFormRepositoryCapacityField = () => wrapper.find('#node-repository-capacity-field'); const findGeoNodeFormFileCapacityField = () => wrapper.find('#node-file-capacity-field'); - const findGeoNodeFormVerificationCapacityField = () => - wrapper.find('#node-verification-capacity-field'); const findGeoNodeFormContainerRepositoryCapacityField = () => wrapper.find('#node-container-repository-capacity-field'); + const findGeoNodeFormVerificationCapacityField = () => + wrapper.find('#node-verification-capacity-field'); const findGeoNodeFormReverificationIntervalField = () => wrapper.find('#node-reverification-interval-field'); describe('template', () => { describe.each` primaryNode | showRepoCapacity | showFileCapacity | showVerificationCapacity | showContainerCapacity | showReverificationInterval - ${true} | ${false} | ${false} | ${true} | ${true} | ${true} + ${true} | ${false} | ${false} | ${true} | ${false} | ${true} ${false} | ${true} | ${true} | ${true} | ${true} | ${false} `( `conditional fields`, @@ -40,8 +40,8 @@ describe('GeoNodeFormCapacities', () => { primaryNode, showRepoCapacity, showFileCapacity, - showVerificationCapacity, showContainerCapacity, + showVerificationCapacity, showReverificationInterval, }) => { beforeEach(() => { @@ -57,14 +57,6 @@ describe('GeoNodeFormCapacities', () => { expect(findGeoNodeFormFileCapacityField().exists()).toBe(showFileCapacity); }); - it(`it ${ - showVerificationCapacity ? 'shows' : 'hides' - } the Verification Capacity Field`, () => { - expect(findGeoNodeFormVerificationCapacityField().exists()).toBe( - showVerificationCapacity, - ); - }); - it(`it ${ showContainerCapacity ? 'shows' : 'hides' } the Container Repository Capacity Field`, () => { @@ -73,6 +65,14 @@ describe('GeoNodeFormCapacities', () => { ); }); + it(`it ${ + showVerificationCapacity ? 'shows' : 'hides' + } the Verification Capacity Field`, () => { + expect(findGeoNodeFormVerificationCapacityField().exists()).toBe( + showVerificationCapacity, + ); + }); + it(`it ${ showReverificationInterval ? 'shows' : 'hides' } the Reverification Interval Field`, () => { diff --git a/ee/spec/javascripts/issuable/related_issues/components/issue_token_spec.js b/ee/spec/frontend/issuable/related_issues/components/issue_token_spec.js similarity index 98% rename from ee/spec/javascripts/issuable/related_issues/components/issue_token_spec.js rename to ee/spec/frontend/issuable/related_issues/components/issue_token_spec.js index 427d44a5147c772744bd6c0cf90d1f2dd767d10b..cd443b6243061f775120792f12a9c58ec3f946c4 100644 --- a/ee/spec/javascripts/issuable/related_issues/components/issue_token_spec.js +++ b/ee/spec/frontend/issuable/related_issues/components/issue_token_spec.js @@ -212,7 +212,7 @@ describe('IssueToken', () => { }); it('when getting checked', () => { - spyOn(vm, '$emit'); + jest.spyOn(vm, '$emit').mockImplementation(() => {}); vm.onRemoveRequest(); expect(vm.$emit).toHaveBeenCalledWith('pendingIssuableRemoveRequest', vm.idKey); diff --git a/ee/spec/javascripts/issuable/related_issues/components/related_issues_block_spec.js b/ee/spec/frontend/issuable/related_issues/components/related_issues_block_spec.js similarity index 98% rename from ee/spec/javascripts/issuable/related_issues/components/related_issues_block_spec.js rename to ee/spec/frontend/issuable/related_issues/components/related_issues_block_spec.js index 2efb657bb4e511e16059087c8516dd1149cdef32..e16793012cdcd6bf5f25aa6aae45b905f3d16ddf 100644 --- a/ee/spec/javascripts/issuable/related_issues/components/related_issues_block_spec.js +++ b/ee/spec/frontend/issuable/related_issues/components/related_issues_block_spec.js @@ -4,7 +4,7 @@ import { issuable1, issuable2, issuable3, -} from 'spec/vue_shared/components/issue/related_issuable_mock_data'; +} from 'jest/vue_shared/components/issue/related_issuable_mock_data'; import { linkedIssueTypesMap, linkedIssueTypesTextMap, diff --git a/ee/spec/javascripts/issuable/related_issues/components/related_issues_list_spec.js b/ee/spec/frontend/issuable/related_issues/components/related_issues_list_spec.js similarity index 93% rename from ee/spec/javascripts/issuable/related_issues/components/related_issues_list_spec.js rename to ee/spec/frontend/issuable/related_issues/components/related_issues_list_spec.js index 9bb5fdcc275598f61da4f8e8ebaf26a4977dbbba..73c10df4520a0566624cbdb731ee0efec5bf30b3 100644 --- a/ee/spec/javascripts/issuable/related_issues/components/related_issues_list_spec.js +++ b/ee/spec/frontend/issuable/related_issues/components/related_issues_list_spec.js @@ -8,7 +8,7 @@ import { issuable3, issuable4, issuable5, -} from 'spec/vue_shared/components/issue/related_issuable_mock_data'; +} from 'jest/vue_shared/components/issue/related_issuable_mock_data'; import { PathIdSeparator } from 'ee/related_issues/constants'; describe('RelatedIssuesList', () => { @@ -180,11 +180,21 @@ describe('RelatedIssuesList', () => { }); it('shows weight', () => { - expect(wrapper.find(IssueWeight).text()).toBe(issuable1.weight.toString()); + expect( + wrapper + .find(IssueWeight) + .find('.board-card-info-text') + .text(), + ).toBe(issuable1.weight.toString()); }); it('shows due date', () => { - expect(wrapper.find(IssueDueDate).text()).toBe('Nov 22, 2010'); + expect( + wrapper + .find(IssueDueDate) + .find('.board-card-info-text') + .text(), + ).toBe('Nov 22, 2010'); }); }); }); diff --git a/ee/spec/frontend/issuable/related_issues/components/related_issues_root_spec.js b/ee/spec/frontend/issuable/related_issues/components/related_issues_root_spec.js new file mode 100644 index 0000000000000000000000000000000000000000..d56e7d81c2ba4564bcb931dd6a1f3dafb45b92d1 --- /dev/null +++ b/ee/spec/frontend/issuable/related_issues/components/related_issues_root_spec.js @@ -0,0 +1,341 @@ +import { mount, shallowMount } from '@vue/test-utils'; +import MockAdapter from 'axios-mock-adapter'; +import waitForPromises from 'helpers/wait_for_promises'; +import RelatedIssuesRoot from 'ee/related_issues/components/related_issues_root.vue'; +import relatedIssuesService from 'ee/related_issues/services/related_issues_service'; +import { linkedIssueTypesMap } from 'ee/related_issues/constants'; +import { + defaultProps, + issuable1, + issuable2, +} from 'jest/vue_shared/components/issue/related_issuable_mock_data'; +import axios from '~/lib/utils/axios_utils'; +import createFlash from '~/flash'; + +jest.mock('~/flash'); + +describe('RelatedIssuesRoot', () => { + let wrapper; + let mock; + + beforeEach(() => { + mock = new MockAdapter(axios); + mock.onGet(defaultProps.endpoint).reply(200, []); + }); + + afterEach(() => { + mock.restore(); + if (wrapper) { + wrapper.destroy(); + wrapper = null; + } + }); + + const createComponent = (mountFn = mount) => { + wrapper = mountFn(RelatedIssuesRoot, { + propsData: defaultProps, + }); + + // Wait for fetch request `fetchRelatedIssues` to complete before starting to test + return waitForPromises(); + }; + + describe('methods', () => { + describe('onRelatedIssueRemoveRequest', () => { + beforeEach(() => { + jest + .spyOn(relatedIssuesService.prototype, 'fetchRelatedIssues') + .mockReturnValue(Promise.reject()); + + return createComponent().then(() => { + wrapper.vm.store.setRelatedIssues([issuable1]); + }); + }); + + it('remove related issue and succeeds', () => { + mock.onDelete(issuable1.referencePath).reply(200, { issues: [] }); + + wrapper.vm.onRelatedIssueRemoveRequest(issuable1.id); + + return axios.waitForAll().then(() => { + expect(wrapper.vm.state.relatedIssues).toEqual([]); + }); + }); + + it('remove related issue, fails, and restores to related issues', () => { + mock.onDelete(issuable1.referencePath).reply(422, {}); + + wrapper.vm.onRelatedIssueRemoveRequest(issuable1.id); + + return axios.waitForAll().then(() => { + expect(wrapper.vm.state.relatedIssues.length).toEqual(1); + expect(wrapper.vm.state.relatedIssues[0].id).toEqual(issuable1.id); + }); + }); + }); + + describe('onToggleAddRelatedIssuesForm', () => { + beforeEach(() => createComponent(shallowMount)); + + it('toggle related issues form to visible', () => { + wrapper.vm.onToggleAddRelatedIssuesForm(); + + expect(wrapper.vm.isFormVisible).toEqual(true); + }); + + it('show add related issues form to hidden', () => { + wrapper.vm.isFormVisible = true; + + wrapper.vm.onToggleAddRelatedIssuesForm(); + + expect(wrapper.vm.isFormVisible).toEqual(false); + }); + }); + + describe('onPendingIssueRemoveRequest', () => { + beforeEach(() => + createComponent().then(() => { + wrapper.vm.store.setPendingReferences([issuable1.reference]); + }), + ); + + it('remove pending related issue', () => { + expect(wrapper.vm.state.pendingReferences.length).toEqual(1); + + wrapper.vm.onPendingIssueRemoveRequest(0); + + expect(wrapper.vm.state.pendingReferences.length).toEqual(0); + }); + }); + + describe('onPendingFormSubmit', () => { + beforeEach(() => { + jest + .spyOn(relatedIssuesService.prototype, 'fetchRelatedIssues') + .mockReturnValue(Promise.reject()); + + return createComponent().then(() => { + jest.spyOn(wrapper.vm, 'processAllReferences'); + jest.spyOn(wrapper.vm.service, 'addRelatedIssues'); + createFlash.mockClear(); + }); + }); + + it('processes references before submitting', () => { + const input = '#123'; + const linkedIssueType = linkedIssueTypesMap.RELATES_TO; + const emitObj = { + pendingReferences: input, + linkedIssueType, + }; + + wrapper.vm.onPendingFormSubmit(emitObj); + + expect(wrapper.vm.processAllReferences).toHaveBeenCalledWith(input); + expect(wrapper.vm.service.addRelatedIssues).toHaveBeenCalledWith([input], linkedIssueType); + }); + + it('submit zero pending issue as related issue', () => { + wrapper.vm.store.setPendingReferences([]); + wrapper.vm.onPendingFormSubmit({}); + + return waitForPromises().then(() => { + expect(wrapper.vm.state.pendingReferences.length).toEqual(0); + expect(wrapper.vm.state.relatedIssues.length).toEqual(0); + }); + }); + + it('submit pending issue as related issue', () => { + mock.onPost(defaultProps.endpoint).reply(200, { + issuables: [issuable1], + result: { + message: 'something was successfully related', + status: 'success', + }, + }); + + wrapper.vm.store.setPendingReferences([issuable1.reference]); + wrapper.vm.onPendingFormSubmit({}); + + return waitForPromises().then(() => { + expect(wrapper.vm.state.pendingReferences.length).toEqual(0); + expect(wrapper.vm.state.relatedIssues.length).toEqual(1); + expect(wrapper.vm.state.relatedIssues[0].id).toEqual(issuable1.id); + }); + }); + + it('submit multiple pending issues as related issues', () => { + mock.onPost(defaultProps.endpoint).reply(200, { + issuables: [issuable1, issuable2], + result: { + message: 'something was successfully related', + status: 'success', + }, + }); + + wrapper.vm.store.setPendingReferences([issuable1.reference, issuable2.reference]); + wrapper.vm.onPendingFormSubmit({}); + + return waitForPromises().then(() => { + expect(wrapper.vm.state.pendingReferences.length).toEqual(0); + expect(wrapper.vm.state.relatedIssues.length).toEqual(2); + expect(wrapper.vm.state.relatedIssues[0].id).toEqual(issuable1.id); + expect(wrapper.vm.state.relatedIssues[1].id).toEqual(issuable2.id); + }); + }); + + it('displays a message from the backend upon error', () => { + const input = '#123'; + const message = 'error'; + + mock.onPost(defaultProps.endpoint).reply(409, { message }); + wrapper.vm.store.setPendingReferences([issuable1.reference, issuable2.reference]); + + expect(createFlash).not.toHaveBeenCalled(); + wrapper.vm.onPendingFormSubmit(input); + + return waitForPromises().then(() => { + expect(createFlash).toHaveBeenCalledWith(message); + }); + }); + }); + + describe('onPendingFormCancel', () => { + beforeEach(() => + createComponent().then(() => { + wrapper.vm.isFormVisible = true; + wrapper.vm.inputValue = 'foo'; + }), + ); + + it('when canceling and hiding add issuable form', () => { + wrapper.vm.onPendingFormCancel(); + + return wrapper.vm.$nextTick().then(() => { + expect(wrapper.vm.isFormVisible).toEqual(false); + expect(wrapper.vm.inputValue).toEqual(''); + expect(wrapper.vm.state.pendingReferences.length).toEqual(0); + }); + }); + }); + + describe('fetchRelatedIssues', () => { + beforeEach(() => createComponent()); + + it('sets isFetching while fetching', () => { + wrapper.vm.fetchRelatedIssues(); + + expect(wrapper.vm.isFetching).toEqual(true); + + return waitForPromises().then(() => { + expect(wrapper.vm.isFetching).toEqual(false); + }); + }); + + it('should fetch related issues', () => { + mock.onGet(defaultProps.endpoint).reply(200, [issuable1, issuable2]); + + wrapper.vm.fetchRelatedIssues(); + + return waitForPromises().then(() => { + expect(wrapper.vm.state.relatedIssues.length).toEqual(2); + expect(wrapper.vm.state.relatedIssues[0].id).toEqual(issuable1.id); + expect(wrapper.vm.state.relatedIssues[1].id).toEqual(issuable2.id); + }); + }); + }); + + describe('onInput', () => { + beforeEach(() => createComponent()); + + it('fill in issue number reference and adds to pending related issues', () => { + const input = '#123 '; + wrapper.vm.onInput({ + untouchedRawReferences: [input.trim()], + touchedReference: input, + }); + + expect(wrapper.vm.state.pendingReferences.length).toEqual(1); + expect(wrapper.vm.state.pendingReferences[0]).toEqual('#123'); + }); + + it('fill in with full reference', () => { + const input = 'asdf/qwer#444 '; + wrapper.vm.onInput({ untouchedRawReferences: [input.trim()], touchedReference: input }); + + expect(wrapper.vm.state.pendingReferences.length).toEqual(1); + expect(wrapper.vm.state.pendingReferences[0]).toEqual('asdf/qwer#444'); + }); + + it('fill in with issue link', () => { + const link = 'http://localhost:3000/foo/bar/issues/111'; + const input = `${link} `; + wrapper.vm.onInput({ untouchedRawReferences: [input.trim()], touchedReference: input }); + + expect(wrapper.vm.state.pendingReferences.length).toEqual(1); + expect(wrapper.vm.state.pendingReferences[0]).toEqual(link); + }); + + it('fill in with multiple references', () => { + const input = 'asdf/qwer#444 #12 '; + wrapper.vm.onInput({ + untouchedRawReferences: input.trim().split(/\s/), + touchedReference: 2, + }); + + expect(wrapper.vm.state.pendingReferences.length).toEqual(2); + expect(wrapper.vm.state.pendingReferences[0]).toEqual('asdf/qwer#444'); + expect(wrapper.vm.state.pendingReferences[1]).toEqual('#12'); + }); + + it('fill in with some invalid things', () => { + const input = 'something random '; + wrapper.vm.onInput({ + untouchedRawReferences: input.trim().split(/\s/), + touchedReference: 2, + }); + + expect(wrapper.vm.state.pendingReferences.length).toEqual(2); + expect(wrapper.vm.state.pendingReferences[0]).toEqual('something'); + expect(wrapper.vm.state.pendingReferences[1]).toEqual('random'); + }); + }); + + describe('onBlur', () => { + beforeEach(() => + createComponent().then(() => { + jest.spyOn(wrapper.vm, 'processAllReferences').mockImplementation(() => {}); + }), + ); + + it('add any references to pending when blurring', () => { + const input = '#123'; + + wrapper.vm.onBlur(input); + + expect(wrapper.vm.processAllReferences).toHaveBeenCalledWith(input); + }); + }); + + describe('processAllReferences', () => { + beforeEach(() => createComponent()); + + it('add valid reference to pending', () => { + const input = '#123'; + wrapper.vm.processAllReferences(input); + + expect(wrapper.vm.state.pendingReferences.length).toEqual(1); + expect(wrapper.vm.state.pendingReferences[0]).toEqual('#123'); + }); + + it('add any valid references to pending', () => { + const input = 'asdf #123'; + wrapper.vm.processAllReferences(input); + + expect(wrapper.vm.state.pendingReferences.length).toEqual(2); + expect(wrapper.vm.state.pendingReferences[0]).toEqual('asdf'); + expect(wrapper.vm.state.pendingReferences[1]).toEqual('#123'); + }); + }); + }); +}); diff --git a/ee/spec/helpers/ee/dashboard_helper_spec.rb b/ee/spec/helpers/ee/dashboard_helper_spec.rb index ed5828ac4a26466e27fe0e4e86bb6b298b1a779a..25c7222fc46d2ad5bcd836305afe54dd1e93f095 100644 --- a/ee/spec/helpers/ee/dashboard_helper_spec.rb +++ b/ee/spec/helpers/ee/dashboard_helper_spec.rb @@ -10,6 +10,8 @@ describe '#dashboard_nav_links' do before do allow(helper).to receive(:current_user).and_return(user) + + stub_feature_flags(group_level_cycle_analytics: false) end describe 'analytics' do @@ -237,6 +239,8 @@ def stub_user_permissions_for(feature, enabled) describe 'analytics_nav_url' do before do + stub_feature_flags(group_level_cycle_analytics: false) + allow(helper).to receive(:current_user).and_return(user) end diff --git a/ee/spec/helpers/ee/groups_helper_spec.rb b/ee/spec/helpers/ee/groups_helper_spec.rb index e64eaa75fe0695110e37c0250c07130f81eb434d..c04bddb14deb20998087c4245d972abbb0bebe2d 100644 --- a/ee/spec/helpers/ee/groups_helper_spec.rb +++ b/ee/spec/helpers/ee/groups_helper_spec.rb @@ -3,14 +3,15 @@ require 'spec_helper' describe GroupsHelper do - let(:user) { create(:user, group_view: :security_dashboard) } + let(:owner) { create(:user, group_view: :security_dashboard) } + let(:current_user) { owner } let(:group) { create(:group, :private) } before do - allow(helper).to receive(:current_user) { user } + allow(helper).to receive(:current_user) { current_user } helper.instance_variable_set(:@group, group) - group.add_owner(user) + group.add_owner(owner) end describe '#group_epics_count' do @@ -49,6 +50,21 @@ expect(helper.group_sidebar_links).not_to include(:contribution_analytics, :epics) end + + context 'when contribution analytics is available' do + before do + stub_licensed_features(contribution_analytics: true) + end + + context 'signed in user is a project member but not a member of the group' do + let(:current_user) { create(:user) } + let(:private_project) { create(:project, :private, group: group)} + + it 'hides Contribution Analytics' do + expect(helper.group_sidebar_links).not_to include(:contribution_analytics) + end + end + end end describe '#permanent_deletion_date' do @@ -107,10 +123,10 @@ with_them do it 'returns the expected value' do - allow(helper).to receive(:current_user) { user? ? user : nil } + allow(helper).to receive(:current_user) { user? ? owner : nil } allow(::Gitlab).to receive(:com?) { gitlab_com? } - allow(user).to receive(:ab_feature_enabled?) { ab_feature_enabled? } - allow(user).to receive(:created_at) { created_at } + allow(owner).to receive(:ab_feature_enabled?) { ab_feature_enabled? } + allow(owner).to receive(:created_at) { created_at } allow(::Feature).to receive(:enabled?).with(:discover_security) { discover_security_feature_enabled? } allow(group).to receive(:feature_available?) { security_dashboard_feature_available? } allow(helper).to receive(:can?) { can_admin_group? } diff --git a/ee/spec/javascripts/issuable/related_issues/components/related_issues_root_spec.js b/ee/spec/javascripts/issuable/related_issues/components/related_issues_root_spec.js deleted file mode 100644 index 721573a433ef6fd37e4399b31a58ebf1108d624f..0000000000000000000000000000000000000000 --- a/ee/spec/javascripts/issuable/related_issues/components/related_issues_root_spec.js +++ /dev/null @@ -1,367 +0,0 @@ -import Vue from 'vue'; -import MockAdapter from 'axios-mock-adapter'; -import relatedIssuesRoot from 'ee/related_issues/components/related_issues_root.vue'; -import relatedIssuesService from 'ee/related_issues/services/related_issues_service'; -import { linkedIssueTypesMap } from 'ee/related_issues/constants'; -import { - defaultProps, - issuable1, - issuable2, -} from 'spec/vue_shared/components/issue/related_issuable_mock_data'; -import axios from '~/lib/utils/axios_utils'; - -describe('RelatedIssuesRoot', () => { - let RelatedIssuesRoot; - let vm; - let mock; - - beforeEach(() => { - RelatedIssuesRoot = Vue.extend(relatedIssuesRoot); - mock = new MockAdapter(axios); - }); - - afterEach(() => { - if (vm) { - vm.$destroy(); - } - mock.restore(); - }); - - describe('methods', () => { - describe('onRelatedIssueRemoveRequest', () => { - beforeEach(done => { - spyOn(relatedIssuesService.prototype, 'fetchRelatedIssues').and.returnValue( - Promise.reject(), - ); - - vm = new RelatedIssuesRoot({ - propsData: defaultProps, - }).$mount(); - - setTimeout(() => { - vm.store.setRelatedIssues([issuable1]); - done(); - }); - }); - - it('remove related issue and succeeds', done => { - mock.onAny().reply(200, { issues: [] }); - - vm.onRelatedIssueRemoveRequest(issuable1.id); - - setTimeout(() => { - expect(vm.state.relatedIssues).toEqual([]); - - done(); - }); - }); - - it('remove related issue, fails, and restores to related issues', done => { - mock.onAny().reply(422, {}); - - vm.onRelatedIssueRemoveRequest(issuable1.id); - - setTimeout(() => { - expect(vm.state.relatedIssues.length).toEqual(1); - expect(vm.state.relatedIssues[0].id).toEqual(issuable1.id); - - done(); - }); - }); - }); - - describe('onToggleAddRelatedIssuesForm', () => { - beforeEach(() => { - vm = new RelatedIssuesRoot({ - propsData: defaultProps, - }).$mount(); - }); - - it('toggle related issues form to visible', () => { - vm.onToggleAddRelatedIssuesForm(); - - expect(vm.isFormVisible).toEqual(true); - }); - - it('show add related issues form to hidden', () => { - vm.isFormVisible = true; - - vm.onToggleAddRelatedIssuesForm(); - - expect(vm.isFormVisible).toEqual(false); - }); - }); - - describe('onPendingIssueRemoveRequest', () => { - beforeEach(() => { - vm = new RelatedIssuesRoot({ - propsData: defaultProps, - }).$mount(); - vm.store.setPendingReferences([issuable1.reference]); - }); - - it('remove pending related issue', () => { - expect(vm.state.pendingReferences.length).toEqual(1); - - vm.onPendingIssueRemoveRequest(0); - - expect(vm.state.pendingReferences.length).toEqual(0); - }); - }); - - describe('onPendingFormSubmit', () => { - beforeEach(() => { - spyOn(relatedIssuesService.prototype, 'fetchRelatedIssues').and.returnValue( - Promise.reject(), - ); - vm = new RelatedIssuesRoot({ - propsData: defaultProps, - }).$mount(); - - spyOn(vm, 'processAllReferences').and.callThrough(); - spyOn(vm.service, 'addRelatedIssues').and.callThrough(); - }); - - it('processes references before submitting', () => { - const input = '#123'; - const linkedIssueType = linkedIssueTypesMap.RELATES_TO; - const emitObj = { - pendingReferences: input, - linkedIssueType, - }; - - vm.onPendingFormSubmit(emitObj); - - expect(vm.processAllReferences).toHaveBeenCalledWith(input); - expect(vm.service.addRelatedIssues).toHaveBeenCalledWith([input], linkedIssueType); - }); - - it('submit zero pending issue as related issue', done => { - vm.store.setPendingReferences([]); - vm.onPendingFormSubmit({}); - - setTimeout(() => { - expect(vm.state.pendingReferences.length).toEqual(0); - expect(vm.state.relatedIssues.length).toEqual(0); - - done(); - }); - }); - - it('submit pending issue as related issue', done => { - mock.onAny().reply(200, { - issuables: [issuable1], - result: { - message: 'something was successfully related', - status: 'success', - }, - }); - - vm.store.setPendingReferences([issuable1.reference]); - vm.onPendingFormSubmit({}); - - setTimeout(() => { - expect(vm.state.pendingReferences.length).toEqual(0); - expect(vm.state.relatedIssues.length).toEqual(1); - expect(vm.state.relatedIssues[0].id).toEqual(issuable1.id); - - done(); - }); - }); - - it('submit multiple pending issues as related issues', done => { - mock.onAny().reply(200, { - issuables: [issuable1, issuable2], - result: { - message: 'something was successfully related', - status: 'success', - }, - }); - - vm.store.setPendingReferences([issuable1.reference, issuable2.reference]); - vm.onPendingFormSubmit({}); - - setTimeout(() => { - expect(vm.state.pendingReferences.length).toEqual(0); - expect(vm.state.relatedIssues.length).toEqual(2); - expect(vm.state.relatedIssues[0].id).toEqual(issuable1.id); - expect(vm.state.relatedIssues[1].id).toEqual(issuable2.id); - - done(); - }); - }); - - // https://gitlab.com/gitlab-org/gitlab/issues/38410 - // eslint-disable-next-line jasmine/no-disabled-tests - xit('displays a message from the backend upon error', done => { - const input = '#123'; - const message = 'error'; - - mock.onAny().reply(409, { message }); - document.body.innerHTML += '<div class="flash-container"></div>'; - - vm.onPendingFormSubmit(input); - - setTimeout(() => { - expect(document.querySelector('.flash-text').innerText.trim()).toContain(message); - document.querySelector('.flash-container').remove(); - done(); - }); - }); - }); - - describe('onPendingFormCancel', () => { - beforeEach(() => { - vm = new RelatedIssuesRoot({ - propsData: defaultProps, - }).$mount(); - vm.isFormVisible = true; - vm.inputValue = 'foo'; - }); - - it('when canceling and hiding add issuable form', () => { - vm.onPendingFormCancel(); - - expect(vm.isFormVisible).toEqual(false); - expect(vm.inputValue).toEqual(''); - expect(vm.state.pendingReferences.length).toEqual(0); - }); - }); - - describe('fetchRelatedIssues', () => { - beforeEach(done => { - vm = new RelatedIssuesRoot({ - propsData: defaultProps, - }).$mount(); - - mock.onAny().reply(200, [issuable1, issuable2]); - - // wait for internal call to fetchRelatedIssues to resolve - setTimeout(done); - }); - - // https://gitlab.com/gitlab-org/gitlab/issues/207376 - // eslint-disable-next-line jasmine/no-disabled-tests - xit('sets isFetching while fetching', done => { - vm.fetchRelatedIssues(); - - expect(vm.isFetching).toEqual(true); - - setTimeout(() => { - expect(vm.isFetching).toEqual(false); - - done(); - }); - }); - - // https://gitlab.com/gitlab-org/gitlab/issues/207376 - // eslint-disable-next-line jasmine/no-disabled-tests - xit('should fetch related issues', done => { - Vue.nextTick(() => { - expect(vm.state.relatedIssues.length).toEqual(2); - expect(vm.state.relatedIssues[0].id).toEqual(issuable1.id); - expect(vm.state.relatedIssues[1].id).toEqual(issuable2.id); - - done(); - }); - }); - }); - - describe('onInput', () => { - beforeEach(() => { - vm = new RelatedIssuesRoot({ - propsData: defaultProps, - }).$mount(); - }); - - it('fill in issue number reference and adds to pending related issues', () => { - const input = '#123 '; - vm.onInput({ - untouchedRawReferences: [input.trim()], - touchedReference: input, - }); - - expect(vm.state.pendingReferences.length).toEqual(1); - expect(vm.state.pendingReferences[0]).toEqual('#123'); - }); - - it('fill in with full reference', () => { - const input = 'asdf/qwer#444 '; - vm.onInput({ untouchedRawReferences: [input.trim()], touchedReference: input }); - - expect(vm.state.pendingReferences.length).toEqual(1); - expect(vm.state.pendingReferences[0]).toEqual('asdf/qwer#444'); - }); - - it('fill in with issue link', () => { - const link = 'http://localhost:3000/foo/bar/issues/111'; - const input = `${link} `; - vm.onInput({ untouchedRawReferences: [input.trim()], touchedReference: input }); - - expect(vm.state.pendingReferences.length).toEqual(1); - expect(vm.state.pendingReferences[0]).toEqual(link); - }); - - it('fill in with multiple references', () => { - const input = 'asdf/qwer#444 #12 '; - vm.onInput({ untouchedRawReferences: input.trim().split(/\s/), touchedReference: 2 }); - - expect(vm.state.pendingReferences.length).toEqual(2); - expect(vm.state.pendingReferences[0]).toEqual('asdf/qwer#444'); - expect(vm.state.pendingReferences[1]).toEqual('#12'); - }); - - it('fill in with some invalid things', () => { - const input = 'something random '; - vm.onInput({ untouchedRawReferences: input.trim().split(/\s/), touchedReference: 2 }); - - expect(vm.state.pendingReferences.length).toEqual(2); - expect(vm.state.pendingReferences[0]).toEqual('something'); - expect(vm.state.pendingReferences[1]).toEqual('random'); - }); - }); - - describe('onBlur', () => { - beforeEach(() => { - vm = new RelatedIssuesRoot({ - propsData: defaultProps, - }).$mount(); - - spyOn(vm, 'processAllReferences'); - }); - - it('add any references to pending when blurring', () => { - const input = '#123'; - - vm.onBlur(input); - - expect(vm.processAllReferences).toHaveBeenCalledWith(input); - }); - }); - - describe('processAllReferences', () => { - beforeEach(() => { - vm = new RelatedIssuesRoot({ - propsData: defaultProps, - }).$mount(); - }); - - it('add valid reference to pending', () => { - const input = '#123'; - vm.processAllReferences(input); - - expect(vm.state.pendingReferences.length).toEqual(1); - expect(vm.state.pendingReferences[0]).toEqual('#123'); - }); - - it('add any valid references to pending', () => { - const input = 'asdf #123'; - vm.processAllReferences(input); - - expect(vm.state.pendingReferences.length).toEqual(2); - expect(vm.state.pendingReferences[0]).toEqual('asdf'); - expect(vm.state.pendingReferences[1]).toEqual('#123'); - }); - }); - }); -}); diff --git a/ee/spec/lib/ee/gitlab/import_export/project/tree_restorer_spec.rb b/ee/spec/lib/ee/gitlab/import_export/project/tree_restorer_spec.rb index ed46718b9cabf469d604d264a27b6d03991c6159..79654fb6064a26a1439c69319d137a2bf645f104 100644 --- a/ee/spec/lib/ee/gitlab/import_export/project/tree_restorer_spec.rb +++ b/ee/spec/lib/ee/gitlab/import_export/project/tree_restorer_spec.rb @@ -18,6 +18,7 @@ end context 'with group' do + let(:issue) { project.issues.find_by_title('Issue with Epic') } let!(:project) do create(:project, :builds_disabled, @@ -35,7 +36,8 @@ expect { restored_project_json }.not_to change { Epic.count } expect(project.group.epics.count).to eq(1) - expect(project.issues.find_by_title('Issue with Epic').epic).not_to be_nil + expect(issue.epic).to eq(epic) + expect(issue.epic_issue.relative_position).not_to be_nil end end @@ -45,7 +47,9 @@ expect { restored_project_json }.to change { Epic.count }.from(0).to(1) expect(project.group.epics.count).to eq(1) - expect(project.issues.find_by_title('Issue with Epic').epic).not_to be_nil + + expect(issue.epic).not_to be_nil + expect(issue.epic_issue.relative_position).not_to be_nil end end end diff --git a/ee/spec/lib/ee/gitlab/import_export/project/tree_saver_spec.rb b/ee/spec/lib/ee/gitlab/import_export/project/tree_saver_spec.rb index f8e3ddf77147b0de1d1080fafaf5d6dd950511f6..031ad3a206e0eb1a0a26b8296a0b150a30d406ee 100644 --- a/ee/spec/lib/ee/gitlab/import_export/project/tree_saver_spec.rb +++ b/ee/spec/lib/ee/gitlab/import_export/project/tree_saver_spec.rb @@ -60,12 +60,21 @@ end context 'epics' do - it 'has issue epic' do - expect(saved_project_json['issues'].first['epic']).not_to be_empty + it 'has epic_issue' do + expect(saved_project_json['issues'].first['epic_issue']).not_to be_empty + expect(saved_project_json['issues'].first['epic_issue']['id']).to eql(epic_issue.id) end - it 'has issue epic id' do - expect(saved_project_json['issues'].first['epic']['id']).to eql(epic.id) + it 'has epic' do + expect(saved_project_json['issues'].first['epic_issue']['epic']['title']).to eql(epic.title) + end + + it 'does not have epic_id' do + expect(saved_project_json['issues'].first['epic_issue']['epic_id']).to be_nil + end + + it 'does not have issue_id' do + expect(saved_project_json['issues'].first['epic_issue']['issue_id']).to be_nil end end end diff --git a/ee/spec/lib/gitlab/elastic/bulk_indexer_spec.rb b/ee/spec/lib/gitlab/elastic/bulk_indexer_spec.rb new file mode 100644 index 0000000000000000000000000000000000000000..0e3c0b7443b90e3ed08e428a0c15323dd0d7ed9c --- /dev/null +++ b/ee/spec/lib/gitlab/elastic/bulk_indexer_spec.rb @@ -0,0 +1,162 @@ +# frozen_string_literal: true + +require 'spec_helper' + +describe Gitlab::Elastic::BulkIndexer, :elastic do + let_it_be(:issue) { create(:issue) } + let_it_be(:other_issue) { create(:issue, project: issue.project) } + + let(:project) { issue.project } + + let(:logger) { ::Gitlab::Elasticsearch::Logger.build } + + subject(:indexer) { described_class.new(logger: logger) } + + let(:es_client) { indexer.client } + + let(:issue_as_ref) { ref(issue) } + let(:issue_as_json_with_times) { issue.__elasticsearch__.as_indexed_json } + let(:issue_as_json) { issue_as_json_with_times.except('created_at', 'updated_at') } + + let(:other_issue_as_ref) { ref(other_issue) } + + describe '#process' do + it 'returns self' do + expect(indexer.process(issue_as_ref)).to be(indexer) + end + + it 'does not send a bulk request per call' do + expect(es_client).not_to receive(:bulk) + + indexer.process(issue_as_ref) + end + + it 'sends a bulk request if the max bulk request size is reached' do + set_bulk_limit(indexer, 1) + + expect(es_client) + .to receive(:bulk) + .with(body: [kind_of(String), kind_of(String)]) + .and_return({}) + + indexer.process(issue_as_ref) + + expect(indexer.failures).to be_empty + end + end + + describe '#flush' do + it 'completes a bulk' do + indexer.process(issue_as_ref) + + expect(es_client) + .to receive(:bulk) + .with(body: [kind_of(String), kind_of(String)]) + .and_return({}) + + expect(indexer.flush).to be_empty + end + + it 'fails documents that elasticsearch refuses to accept' do + # Indexes with uppercase characters are invalid + expect(other_issue_as_ref.database_record.__elasticsearch__) + .to receive(:index_name) + .and_return('Invalid') + + indexer.process(issue_as_ref) + indexer.process(other_issue_as_ref) + + expect(indexer.flush).to contain_exactly(other_issue_as_ref) + expect(indexer.failures).to contain_exactly(other_issue_as_ref) + + refresh_index! + + expect(search_one(Issue)).to have_attributes(issue_as_json) + end + + it 'fails all documents on exception' do + expect(es_client).to receive(:bulk) { raise 'An exception' } + + indexer.process(issue_as_ref) + indexer.process(other_issue_as_ref) + + expect(indexer.flush).to contain_exactly(issue_as_ref, other_issue_as_ref) + expect(indexer.failures).to contain_exactly(issue_as_ref, other_issue_as_ref) + end + + context 'indexing an issue' do + it 'adds the issue to the index' do + expect(indexer.process(issue_as_ref).flush).to be_empty + + refresh_index! + + expect(search_one(Issue)).to have_attributes(issue_as_json) + end + + it 'reindexes an unchanged issue' do + ensure_elasticsearch_index! + + expect(es_client).to receive(:bulk).and_call_original + expect(indexer.process(issue_as_ref).flush).to be_empty + end + + it 'reindexes a changed issue' do + ensure_elasticsearch_index! + issue.update!(title: 'new title') + + expect(issue_as_json['title']).to eq('new title') + expect(indexer.process(issue_as_ref).flush).to be_empty + + refresh_index! + + expect(search_one(Issue)).to have_attributes(issue_as_json) + end + end + + context 'deleting an issue' do + it 'removes the issue from the index' do + ensure_elasticsearch_index! + + expect(issue_as_ref).to receive(:database_record).and_return(nil) + expect(indexer.process(issue_as_ref).flush).to be_empty + + refresh_index! + + expect(search(Issue, '*').size).to eq(0) + end + + it 'succeeds even if the issue is not present' do + expect(issue_as_ref).to receive(:database_record).and_return(nil) + expect(indexer.process(issue_as_ref).flush).to be_empty + + refresh_index! + + expect(search(Issue, '*').size).to eq(0) + end + end + end + + def ref(record) + Gitlab::Elastic::DocumentReference.build(record) + end + + def stub_es_client(indexer, client) + allow(indexer).to receive(:client) { client } + end + + def set_bulk_limit(indexer, bytes) + allow(indexer).to receive(:bulk_limit_bytes) { bytes } + end + + def search(klass, text) + klass.__elasticsearch__.search(text) + end + + def search_one(klass) + results = search(klass, '*') + + expect(results.size).to eq(1) + + results.first._source + end +end diff --git a/ee/spec/lib/gitlab/elastic/document_reference_spec.rb b/ee/spec/lib/gitlab/elastic/document_reference_spec.rb new file mode 100644 index 0000000000000000000000000000000000000000..cc1b6c3c95e2797eefe7015928da97fe6cd4eacc --- /dev/null +++ b/ee/spec/lib/gitlab/elastic/document_reference_spec.rb @@ -0,0 +1,170 @@ +# frozen_string_literal: true + +require 'spec_helper' + +describe Gitlab::Elastic::DocumentReference do + let_it_be(:issue) { create(:issue) } + let(:project) { issue.project } + + let(:issue_as_array) { [Issue, issue.id, issue.es_id, issue.es_parent] } + let(:issue_as_ref) { described_class.new(*issue_as_array) } + let(:issue_as_str) { issue_as_array.join(' ') } + + let(:project_as_array) { [Project, project.id, project.es_id] } + let(:project_as_ref) { described_class.new(*project_as_array) } + let(:project_as_str) { project_as_array.join(' ') } + + describe '.build' do + it 'builds a document for an issue' do + expect(described_class.build(issue)).to eq(issue_as_ref) + end + + it 'builds a document for a project' do + expect(described_class.build(project)).to eq(project_as_ref) + end + end + + describe '.serialize' do + it 'does nothing to a string' do + expect(described_class.serialize('foo')).to eq('foo') + end + + it 'serializes a DocumentReference' do + expect(described_class.serialize(issue_as_ref)).to eq(issue_as_str) + end + + it 'defers to serialize_record for ApplicationRecord instances' do + expect(described_class).to receive(:serialize_record).with(issue) + + described_class.serialize(issue) + end + + it 'defers to serialize_array for Array instances' do + expect(described_class).to receive(:serialize_array).with(issue_as_array) + + described_class.serialize(issue_as_array) + end + + it 'fails to serialize an unrecognised value' do + expect { described_class.serialize(1) }.to raise_error(described_class::InvalidError) + end + end + + describe '.serialize_record' do + it 'serializes an issue' do + expect(described_class.serialize(issue)).to eq(issue_as_str) + end + + it 'serializes a project' do + expect(described_class.serialize(project)).to eq(project_as_str) + end + end + + describe '.serialize_array' do + it 'serializes a project array' do + expect(described_class.serialize(project_as_array)).to eq(project_as_str) + end + + it 'serializes an issue array' do + expect(described_class.serialize(issue_as_array)).to eq(issue_as_str) + end + + it 'fails to serialize a too-small array' do + expect { described_class.serialize(project_as_array[0..1]) }.to raise_error(described_class::InvalidError) + end + + it 'fails to serialize a too-large array' do + expect { described_class.serialize(project_as_array * 2) }.to raise_error(described_class::InvalidError) + end + end + + describe '.deserialize' do + it 'deserializes an issue string' do + expect(described_class.deserialize(issue_as_str)).to eq(issue_as_ref) + end + + it 'deserializes a project string' do + expect(described_class.deserialize(project_as_str)).to eq(project_as_ref) + end + end + + describe '#initialize' do + it 'creates an issue reference' do + expect(described_class.new(*issue_as_array)).to eq(issue_as_ref) + end + + it 'creates a project reference' do + expect(described_class.new(*project_as_array)).to eq(project_as_ref) + end + end + + describe '#==' do + let(:subclass) { Class.new(described_class) } + + it 'is equal to itself' do + expect(issue_as_ref).to eq(issue_as_ref) + end + + it 'is equal to another ref when all elements match' do + expect(issue_as_ref).to eq(described_class.new(*issue_as_array)) + end + + it 'is not equal unless the other instance class matches' do + expect(issue_as_ref).not_to eq(subclass.new(*issue_as_array)) + end + + it 'is not equal unless db_id matches' do + other = described_class.new(Issue, issue.id + 1, issue.es_id, issue.es_parent) + + expect(issue_as_ref).not_to eq(other) + end + + it 'is not equal unless es_id matches' do + other = described_class.new(Issue, issue.id, 'Other es_id', issue.es_parent) + + expect(issue_as_ref).not_to eq(other) + end + + it 'is not equal unless es_parent matches' do + other = described_class.new(Issue, issue.id, issue.es_id, 'Other es_parent') + + expect(issue_as_ref).not_to eq(other) + end + end + + describe '#klass_name' do + it { expect(issue_as_ref.klass_name).to eq('Issue') } + end + + describe '#database_record' do + it 'returns an issue' do + expect(issue_as_ref.database_record).to eq(issue) + end + + it 'returns a project' do + expect(project_as_ref.database_record).to eq(project) + end + + it 'returns nil if the record cannot be found' do + ref = described_class.new(Issue, issue.id + 1, 'issue_1') + + expect(ref.database_record).to be_nil + end + + it 'raises if the class is bad' do + ref = described_class.new(Integer, 1, 'integer_1') + + expect { ref.database_record }.to raise_error(NoMethodError) + end + end + + describe '#serialize' do + it 'serializes an issue' do + expect(issue_as_ref.serialize).to eq(issue_as_str) + end + + it 'serializes a project' do + expect(project_as_ref.serialize).to eq(project_as_str) + end + end +end diff --git a/ee/spec/models/concerns/elastic/note_spec.rb b/ee/spec/models/concerns/elastic/note_spec.rb index 5fe5d324f6a476e0f633428a78cd3a059992302e..1766aa6a159cd30f0be3c7d06552fde93782c97a 100644 --- a/ee/spec/models/concerns/elastic/note_spec.rb +++ b/ee/spec/models/concerns/elastic/note_spec.rb @@ -107,6 +107,8 @@ end it "does not create ElasticIndexerWorker job for system messages" do + stub_feature_flags(elastic_bulk_incremental_updates: false) + project = create :project, :repository # We have to set one minute delay because of https://gitlab.com/gitlab-org/gitlab-foss/merge_requests/15682 issue = create :issue, project: project, updated_at: 1.minute.ago @@ -116,6 +118,16 @@ create :note, :system, project: project, noteable: issue end + it 'does not track system note updates via the bulk updater' do + stub_feature_flags(elastic_bulk_incremental_updates: true) + + note = create(:note, :system) + + expect(Elastic::ProcessBookkeepingService).not_to receive(:track!) + + note.update!(note: 'some other text here') + end + it 'uses same index for Note subclasses' do Note.subclasses.each do |note_class| expect(note_class.index_name).to eq(Note.index_name) diff --git a/ee/spec/policies/group_policy_spec.rb b/ee/spec/policies/group_policy_spec.rb index 5e4837cde7dacf202469953bfa6469bfacfae1e1..b7c5f9f179a19bd29973e0673804f9b288229f52 100644 --- a/ee/spec/policies/group_policy_spec.rb +++ b/ee/spec/policies/group_policy_spec.rb @@ -48,7 +48,34 @@ stub_licensed_features(contribution_analytics: true) end - it { is_expected.to be_allowed(:read_group_contribution_analytics) } + context 'when signed in user is a member of the group' do + it { is_expected.to be_allowed(:read_group_contribution_analytics) } + end + + describe 'when user is not a member of the group' do + let(:current_user) { non_group_member } + let(:private_group) { create(:group, :private) } + + subject { described_class.new(non_group_member, private_group) } + + context 'when user is not invited to any of the group projects' do + it do + is_expected.not_to be_allowed(:read_group_contribution_analytics) + end + end + + context 'when user is invited to a group project, but not to the group' do + let(:private_project) { create(:project, :private, group: private_group) } + + before do + private_project.add_guest(non_group_member) + end + + it do + is_expected.not_to be_allowed(:read_group_contribution_analytics) + end + end + end end context 'when contribution analytics is not available' do diff --git a/ee/spec/services/elastic/index_record_service_spec.rb b/ee/spec/services/elastic/index_record_service_spec.rb index b98ddaba5a64522bc0401e2e87e5ffd1585df0ac..8bf36b730bfc91675f5976de815dddf24e658b9c 100644 --- a/ee/spec/services/elastic/index_record_service_spec.rb +++ b/ee/spec/services/elastic/index_record_service_spec.rb @@ -25,10 +25,10 @@ with_them do it 'indexes new records' do - object = nil - Sidekiq::Testing.disable! do - object = create(type) - end + object = create(type) + + # Prevent records from being added via bulk indexing updates + ::Elastic::ProcessBookkeepingService.clear_tracking! expect do expect(subject.execute(object, true)).to eq(true) @@ -122,10 +122,14 @@ Sidekiq::Testing.inline! do expect(subject.execute(other_project, true)).to eq(true) end + + # Prevent records from being added via bulk indexing updates + ::Elastic::ProcessBookkeepingService.clear_tracking! + ensure_elasticsearch_index! # Only the project itself should be in the index - expect(Elasticsearch::Model.search('*').total_count).to be 1 + expect(Elasticsearch::Model.search('*').total_count).to eq(1) expect(Project.elastic_search('*').records).to contain_exactly(other_project) end @@ -312,13 +316,9 @@ def expect_indexing(issue_ids, response, unstub: false) end it 'skips records for which indexing is disabled' do - project = nil - - Sidekiq::Testing.disable! do - project = create :project, name: 'project_1' - end + stub_ee_application_setting(elasticsearch_limit_indexing: true) - expect(project).to receive(:use_elasticsearch?).and_return(false) + project = create(:project, name: 'project_1') Sidekiq::Testing.inline! do expect(subject.execute(project, true)).to eq(true) diff --git a/ee/spec/services/elastic/process_bookkeeping_service_spec.rb b/ee/spec/services/elastic/process_bookkeeping_service_spec.rb new file mode 100644 index 0000000000000000000000000000000000000000..a0c070c51a01065b56c4c0f38ea825eaa5ecf47c --- /dev/null +++ b/ee/spec/services/elastic/process_bookkeeping_service_spec.rb @@ -0,0 +1,140 @@ +# frozen_string_literal: true + +require 'spec_helper' + +describe Elastic::ProcessBookkeepingService, :clean_gitlab_redis_shared_state do + around do |example| + described_class.with_redis do |redis| + @redis = redis + example.run + end + end + + let(:zset) { 'elastic:incremental:updates:0:zset' } + let(:redis) { @redis } + let(:ref_class) { ::Gitlab::Elastic::DocumentReference } + + let(:fake_refs) { Array.new(10) { |i| ref_class.new(Issue, i, "issue_#{i}", 'project_1') } } + let(:issue) { fake_refs.first } + let(:issue_spec) { issue.serialize } + + describe '.track' do + it 'enqueues a record' do + described_class.track!(issue) + + spec, score = redis.zpopmin(zset) + + expect(spec).to eq(issue_spec) + expect(score).to eq(1.0) + end + + it 'enqueues a set of unique records' do + described_class.track!(*fake_refs) + + expect(described_class.queue_size).to eq(fake_refs.size) + + spec1, score1 = redis.zpopmin(zset) + _, score2 = redis.zpopmin(zset) + + expect(score1).to be < score2 + expect(spec1).to eq(issue_spec) + end + + it 'enqueues 10 identical records as 1 entry' do + described_class.track!(*([issue] * 10)) + + expect(described_class.queue_size).to eq(1) + end + + it 'deduplicates across multiple inserts' do + 10.times { described_class.track!(issue) } + + expect(described_class.queue_size).to eq(1) + end + end + + describe '.queue_size' do + it 'reports the queue size' do + expect(described_class.queue_size).to eq(0) + + described_class.track!(*fake_refs) + + expect(described_class.queue_size).to eq(fake_refs.size) + + expect { redis.zpopmin(zset) }.to change(described_class, :queue_size).by(-1) + end + end + + describe '.clear_tracking!' do + it 'removes all entries from the queue' do + described_class.track!(*fake_refs) + + expect(described_class.queue_size).to eq(fake_refs.size) + + described_class.clear_tracking! + + expect(described_class.queue_size).to eq(0) + end + end + + describe '#execute' do + let(:limit) { 5 } + + before do + stub_const('Elastic::ProcessBookkeepingService::LIMIT', limit) + end + + it 'submits a batch of documents' do + described_class.track!(*fake_refs) + + expect(described_class.queue_size).to eq(fake_refs.size) + expect_processing(*fake_refs[0...limit]) + + expect { described_class.new.execute }.to change(described_class, :queue_size).by(-limit) + end + + it 'retries failed documents' do + described_class.track!(*fake_refs) + failed = fake_refs[0] + + expect(described_class.queue_size).to eq(10) + expect_processing(*fake_refs[0...limit], failures: [failed]) + + expect { described_class.new.execute }.to change(described_class, :queue_size).by(-limit + 1) + + serialized, _ = redis.zpopmax(zset) + expect(ref_class.deserialize(serialized)).to eq(failed) + end + + it 'discards malformed documents' do + described_class.track!('Bad') + + expect(described_class.queue_size).to eq(1) + expect_next_instance_of(::Gitlab::Elastic::BulkIndexer) do |indexer| + expect(indexer).not_to receive(:process) + end + + expect { described_class.new.execute }.to change(described_class, :queue_size).by(-1) + end + + it 'fails, preserving documents, when processing fails with an exception' do + described_class.track!(issue) + + expect(described_class.queue_size).to eq(1) + expect_next_instance_of(::Gitlab::Elastic::BulkIndexer) do |indexer| + expect(indexer).to receive(:process).with(issue) { raise 'Bad' } + end + + expect { described_class.new.execute }.to raise_error('Bad') + expect(described_class.queue_size).to eq(1) + end + + def expect_processing(*refs, failures: []) + expect_next_instance_of(::Gitlab::Elastic::BulkIndexer) do |indexer| + refs.each { |ref| expect(indexer).to receive(:process).with(ref) } + + expect(indexer).to receive(:flush) { failures } + end + end + end +end diff --git a/ee/spec/support/elastic.rb b/ee/spec/support/elastic.rb index 188244a259ce4b50d27fa296eefdc7d72ab21660..96586855a40ccf8a8c4de415a069e6c000a54803 100644 --- a/ee/spec/support/elastic.rb +++ b/ee/spec/support/elastic.rb @@ -2,11 +2,13 @@ RSpec.configure do |config| config.before(:each, :elastic) do + Elastic::ProcessBookkeepingService.clear_tracking! Gitlab::Elastic::Helper.create_empty_index end config.after(:each, :elastic) do Gitlab::Elastic::Helper.delete_index + Elastic::ProcessBookkeepingService.clear_tracking! end config.include ElasticsearchHelpers, :elastic diff --git a/ee/spec/support/helpers/elasticsearch_helpers.rb b/ee/spec/support/helpers/elasticsearch_helpers.rb index c8b9a46fd58ad3fef0096790fdfabd49a86a71cc..09ce1981032a76dcc1c81ae5ca65b10689f36e9f 100644 --- a/ee/spec/support/helpers/elasticsearch_helpers.rb +++ b/ee/spec/support/helpers/elasticsearch_helpers.rb @@ -2,6 +2,14 @@ module ElasticsearchHelpers def ensure_elasticsearch_index! + # Ensure that any enqueued updates are processed + Elastic::ProcessBookkeepingService.new.execute + + # Make any documents added to the index visible + refresh_index! + end + + def refresh_index! ::Gitlab::Elastic::Helper.refresh_index end end diff --git a/ee/spec/views/layouts/nav/sidebar/_analytics.html.haml_spec.rb b/ee/spec/views/layouts/nav/sidebar/_analytics.html.haml_spec.rb index 1d630f0600a9bb32b9dd7ea81909c858622c7c7f..ab78fde0524cfc0c4d194b1c8347a4fe0b7a0aa9 100644 --- a/ee/spec/views/layouts/nav/sidebar/_analytics.html.haml_spec.rb +++ b/ee/spec/views/layouts/nav/sidebar/_analytics.html.haml_spec.rb @@ -9,6 +9,7 @@ before do stub_feature_flags(group_level_productivity_analytics: false) + stub_feature_flags(group_level_cycle_analytics: false) end context 'top-level items' do diff --git a/ee/spec/views/layouts/nav/sidebar/_group.html.haml_spec.rb b/ee/spec/views/layouts/nav/sidebar/_group.html.haml_spec.rb index 6017cd2c910143c9f1e4e38bf21ab09b30d5f9bf..fb0d67cd8b38cc54a1188cded296860d6c16683e 100644 --- a/ee/spec/views/layouts/nav/sidebar/_group.html.haml_spec.rb +++ b/ee/spec/views/layouts/nav/sidebar/_group.html.haml_spec.rb @@ -11,23 +11,50 @@ let(:user) { create(:user) } describe 'contribution analytics tab' do - it 'is not visible when there is no valid license and we dont show promotions' do - stub_licensed_features(contribution_analytics: false) + let!(:current_user) { create(:user) } - render + before do + group.add_guest(current_user) - expect(rendered).not_to have_text 'Contribution Analytics' + allow(view).to receive(:current_user).and_return(current_user) end - context 'no license installed' do - let!(:cuser) { create(:admin) } + context 'contribution analytics feature is available' do + before do + stub_licensed_features(contribution_analytics: true) + end + + it 'is visible' do + render + expect(rendered).to have_text 'Contribution Analytics' + end + end + + context 'contribution analytics feature is not available' do + before do + stub_licensed_features(contribution_analytics: false) + end + + context 'we do not show promotions' do + before do + allow(LicenseHelper).to receive(:show_promotions?).and_return(false) + end + + it 'is not visible' do + render + + expect(rendered).not_to have_text 'Contribution Analytics' + end + end + end + + context 'no license installed' do before do allow(License).to receive(:current).and_return(nil) stub_application_setting(check_namespace_plan: false) allow(view).to receive(:can?) { |*args| Ability.allowed?(*args) } - allow(view).to receive(:current_user).and_return(cuser) end it 'is visible when there is no valid license but we show promotions' do diff --git a/ee/spec/workers/elastic_index_bulk_cron_worker_spec.rb b/ee/spec/workers/elastic_index_bulk_cron_worker_spec.rb new file mode 100644 index 0000000000000000000000000000000000000000..833970ab6a4b5e100ebd84258fd677c6a872af75 --- /dev/null +++ b/ee/spec/workers/elastic_index_bulk_cron_worker_spec.rb @@ -0,0 +1,18 @@ +# frozen_string_literal: true + +require 'spec_helper' + +describe ElasticIndexBulkCronWorker do + include ExclusiveLeaseHelpers + describe '.perform' do + it 'executes the service under an exclusive lease' do + expect_to_obtain_exclusive_lease('elastic_index_bulk_cron_worker') + + expect_next_instance_of(::Elastic::ProcessBookkeepingService) do |service| + expect(service).to receive(:execute) + end + + described_class.new.perform + end + end +end diff --git a/lib/gitlab/database/migration_helpers.rb b/lib/gitlab/database/migration_helpers.rb index f75e943671b13756ddae75dff0c887d7592cef13..82a8450895933389e32568a96c60d08686f21ec1 100644 --- a/lib/gitlab/database/migration_helpers.rb +++ b/lib/gitlab/database/migration_helpers.rb @@ -215,7 +215,7 @@ def validate_foreign_key(source, column, name: nil) fk_name = name || concurrent_foreign_key_name(source, column) unless foreign_key_exists?(source, name: fk_name) - raise "cannot find #{fk_name} on #{source} table" + raise missing_schema_object_message(source, "foreign key", fk_name) end disable_statement_timeout do @@ -931,7 +931,10 @@ def copy_foreign_keys(table, old, new) def column_for(table, name) name = name.to_s - columns(table).find { |column| column.name == name } + column = columns(table).find { |column| column.name == name } + raise(missing_schema_object_message(table, "column", name)) if column.nil? + + column end # This will replace the first occurrence of a string in a column with @@ -1166,6 +1169,18 @@ def bulk_migrate_async(*args) private + def missing_schema_object_message(table, type, name) + <<~MESSAGE + Could not find #{type} "#{name}" on table "#{table}" which was referenced during the migration. + This issue could be caused by the database schema straying from the expected state. + + To resolve this issue, please verify: + 1. all previous migrations have completed + 2. the database objects used in this migration match the Rails definition in schema.rb or structure.sql + + MESSAGE + end + def tables_match?(target_table, foreign_key_table) target_table.blank? || foreign_key_table == target_table end diff --git a/lib/gitlab/import_export/project/import_export.yml b/lib/gitlab/import_export/project/import_export.yml index fd83a275e0d65f77737a29dd8235badf08299a5a..645b0540f614596fbce24e0ccde51d13f57bc2a2 100644 --- a/lib/gitlab/import_export/project/import_export.yml +++ b/lib/gitlab/import_export/project/import_export.yml @@ -319,6 +319,9 @@ excluded_attributes: - :state_id - :start_date_sourcing_epic_id - :due_date_sourcing_epic_id + epic_issue: + - :epic_id + - :issue_id methods: notes: - :type @@ -371,7 +374,8 @@ ee: - design_versions: - actions: - :design # Duplicate export of issues.designs in order to link the record to both Issue and Action - - :epic + - epic_issue: + - :epic - protected_branches: - :unprotect_access_levels - protected_environments: diff --git a/lib/gitlab/sidekiq_middleware.rb b/lib/gitlab/sidekiq_middleware.rb index c3a52a1986dd1793fc8ed0a795c15efef06b098d..37165d787c7e3d81b3c93f906a2c268a1a231ea5 100644 --- a/lib/gitlab/sidekiq_middleware.rb +++ b/lib/gitlab/sidekiq_middleware.rb @@ -9,18 +9,18 @@ module SidekiqMiddleware # eg: `config.server_middleware(&Gitlab::SidekiqMiddleware.server_configurator)` def self.server_configurator(metrics: true, arguments_logger: true, memory_killer: true, request_store: true) lambda do |chain| - chain.add Gitlab::SidekiqMiddleware::Monitor - chain.add Gitlab::SidekiqMiddleware::ServerMetrics if metrics - chain.add Gitlab::SidekiqMiddleware::ArgumentsLogger if arguments_logger - chain.add Gitlab::SidekiqMiddleware::MemoryKiller if memory_killer - chain.add Gitlab::SidekiqMiddleware::RequestStoreMiddleware if request_store - chain.add Gitlab::SidekiqMiddleware::BatchLoader - chain.add Labkit::Middleware::Sidekiq::Server - chain.add Gitlab::SidekiqMiddleware::InstrumentationLogger - chain.add Gitlab::SidekiqMiddleware::AdminMode::Server - chain.add Gitlab::SidekiqStatus::ServerMiddleware - chain.add Gitlab::SidekiqMiddleware::WorkerContext::Server - chain.add Gitlab::SidekiqMiddleware::DuplicateJobs::Server + chain.add ::Gitlab::SidekiqMiddleware::Monitor + chain.add ::Gitlab::SidekiqMiddleware::ServerMetrics if metrics + chain.add ::Gitlab::SidekiqMiddleware::ArgumentsLogger if arguments_logger + chain.add ::Gitlab::SidekiqMiddleware::MemoryKiller if memory_killer + chain.add ::Gitlab::SidekiqMiddleware::RequestStoreMiddleware if request_store + chain.add ::Gitlab::SidekiqMiddleware::BatchLoader + chain.add ::Labkit::Middleware::Sidekiq::Server + chain.add ::Gitlab::SidekiqMiddleware::InstrumentationLogger + chain.add ::Gitlab::SidekiqMiddleware::AdminMode::Server + chain.add ::Gitlab::SidekiqStatus::ServerMiddleware + chain.add ::Gitlab::SidekiqMiddleware::WorkerContext::Server + chain.add ::Gitlab::SidekiqMiddleware::DuplicateJobs::Server end end @@ -29,12 +29,12 @@ def self.server_configurator(metrics: true, arguments_logger: true, memory_kille # eg: `config.client_middleware(&Gitlab::SidekiqMiddleware.client_configurator)` def self.client_configurator lambda do |chain| - chain.add Gitlab::SidekiqStatus::ClientMiddleware - chain.add Gitlab::SidekiqMiddleware::ClientMetrics - chain.add Gitlab::SidekiqMiddleware::WorkerContext::Client # needs to be before the Labkit middleware - chain.add Labkit::Middleware::Sidekiq::Client - chain.add Gitlab::SidekiqMiddleware::AdminMode::Client - chain.add Gitlab::SidekiqMiddleware::DuplicateJobs::Client + chain.add ::Gitlab::SidekiqStatus::ClientMiddleware + chain.add ::Gitlab::SidekiqMiddleware::ClientMetrics + chain.add ::Gitlab::SidekiqMiddleware::WorkerContext::Client # needs to be before the Labkit middleware + chain.add ::Labkit::Middleware::Sidekiq::Client + chain.add ::Gitlab::SidekiqMiddleware::AdminMode::Client + chain.add ::Gitlab::SidekiqMiddleware::DuplicateJobs::Client end end end diff --git a/spec/features/groups/navbar_spec.rb b/spec/features/groups/navbar_spec.rb index 8c16dcec42f169089898aafc080e1e9452b33965..4d45bcf639daf8fdb530a8e0b2624e64b5f068a4 100644 --- a/spec/features/groups/navbar_spec.rb +++ b/spec/features/groups/navbar_spec.rb @@ -73,5 +73,20 @@ it_behaves_like 'verified navigation bar' end + + context 'when value stream analytics is available' do + before do + stub_licensed_features(cycle_analytics_for_groups: true) + + analytics_nav_item[:nav_sub_items] << _('Value Stream Analytics') + + group.add_maintainer(user) + sign_in(user) + + visit group_path(group) + end + + it_behaves_like 'verified navigation bar' + end end end diff --git a/spec/fixtures/lib/gitlab/import_export/group/project.json b/spec/fixtures/lib/gitlab/import_export/group/project.json index ce4fa1981ff4496956de0a02e6a502ba3be94ea0..e8e1e53a86ad464ace49291d61342b0b8a7d321d 100644 --- a/spec/fixtures/lib/gitlab/import_export/group/project.json +++ b/spec/fixtures/lib/gitlab/import_export/group/project.json @@ -205,36 +205,40 @@ "iid": 1, "group_id": 100 }, - "epic": { - "id": 1, - "group_id": 5, - "author_id": 1, - "assignee_id": null, - "iid": 1, - "updated_by_id": null, - "last_edited_by_id": null, - "lock_version": 0, - "start_date": null, - "end_date": null, - "last_edited_at": null, - "created_at": "2019-12-08T19:37:07.098Z", - "updated_at": "2019-12-08T19:43:11.568Z", - "title": "An epic", - "description": null, - "start_date_sourcing_milestone_id": null, - "due_date_sourcing_milestone_id": null, - "start_date_fixed": null, - "due_date_fixed": null, - "start_date_is_fixed": null, - "due_date_is_fixed": null, - "closed_by_id": null, - "closed_at": null, - "parent_id": null, - "relative_position": null, - "state_id": "opened", - "start_date_sourcing_epic_id": null, - "due_date_sourcing_epic_id": null, - "milestone_id": null + "epic_issue": { + "id": 78, + "relative_position": 1073740323, + "epic": { + "id": 1, + "group_id": 5, + "author_id": 1, + "assignee_id": null, + "iid": 1, + "updated_by_id": null, + "last_edited_by_id": null, + "lock_version": 0, + "start_date": null, + "end_date": null, + "last_edited_at": null, + "created_at": "2019-12-08T19:37:07.098Z", + "updated_at": "2019-12-08T19:43:11.568Z", + "title": "An epic", + "description": null, + "start_date_sourcing_milestone_id": null, + "due_date_sourcing_milestone_id": null, + "start_date_fixed": null, + "due_date_fixed": null, + "start_date_is_fixed": null, + "due_date_is_fixed": null, + "closed_by_id": null, + "closed_at": null, + "parent_id": null, + "relative_position": null, + "state_id": "opened", + "start_date_sourcing_epic_id": null, + "due_date_sourcing_epic_id": null, + "milestone_id": null + } } } ], diff --git a/spec/javascripts/blob_edit/blob_bundle_spec.js b/spec/frontend/blob_edit/blob_bundle_spec.js similarity index 93% rename from spec/javascripts/blob_edit/blob_bundle_spec.js rename to spec/frontend/blob_edit/blob_bundle_spec.js index 06c6a603155403d6d913d484b0251c88e7179991..be438781850db0ae4ef6a03940407694257a2e8c 100644 --- a/spec/javascripts/blob_edit/blob_bundle_spec.js +++ b/spec/frontend/blob_edit/blob_bundle_spec.js @@ -1,9 +1,10 @@ import $ from 'jquery'; import blobBundle from '~/blob_edit/blob_bundle'; +jest.mock('~/blob_edit/edit_blob'); + describe('BlobBundle', () => { beforeEach(() => { - spyOnDependency(blobBundle, 'EditBlob').and.stub(); setFixtures(` <div class="js-edit-blob-form" data-blob-filename="blah"> <button class="js-commit-button"></button> diff --git a/spec/frontend/boards/components/issue_card_inner_scoped_label_spec.js b/spec/frontend/boards/components/issue_card_inner_scoped_label_spec.js index 7389cb14ecb3600912fb18fac2a5d1e053a3d6c1..53e670e76da5bb5cf6381c9e2707c7310c6fa134 100644 --- a/spec/frontend/boards/components/issue_card_inner_scoped_label_spec.js +++ b/spec/frontend/boards/components/issue_card_inner_scoped_label_spec.js @@ -1,43 +1,40 @@ -import Vue from 'vue'; -import mountComponent from 'helpers/vue_mount_component_helper'; +import { GlLink } from '@gitlab/ui'; +import { shallowMount } from '@vue/test-utils'; import IssueCardInnerScopedLabel from '~/boards/components/issue_card_inner_scoped_label.vue'; describe('IssueCardInnerScopedLabel Component', () => { - let vm; - const Component = Vue.extend(IssueCardInnerScopedLabel); - const props = { - label: { title: 'Foo::Bar', description: 'Some Random Description' }, - labelStyle: { background: 'white', color: 'black' }, - scopedLabelsDocumentationLink: '/docs-link', - }; - const createComponent = () => mountComponent(Component, { ...props }); + let wrapper; beforeEach(() => { - vm = createComponent(); + wrapper = shallowMount(IssueCardInnerScopedLabel, { + propsData: { + label: { title: 'Foo::Bar', description: 'Some Random Description' }, + labelStyle: { background: 'white', color: 'black' }, + scopedLabelsDocumentationLink: '/docs-link', + }, + }); }); afterEach(() => { - vm.$destroy(); + wrapper.destroy(); }); it('should render label title', () => { - expect(vm.$el.querySelector('.color-label').textContent.trim()).toEqual('Foo::Bar'); + expect(wrapper.find('.color-label').text()).toBe('Foo::Bar'); }); it('should render question mark symbol', () => { - expect(vm.$el.querySelector('.fa-question-circle')).not.toBeNull(); + expect(wrapper.find('.fa-question-circle').exists()).toBe(true); }); it('should render label style provided', () => { - const node = vm.$el.querySelector('.color-label'); + const label = wrapper.find('.color-label'); - expect(node.style.background).toEqual(props.labelStyle.background); - expect(node.style.color).toEqual(props.labelStyle.color); + expect(label.attributes('style')).toContain('background: white;'); + expect(label.attributes('style')).toContain('color: black;'); }); it('should render the docs link', () => { - expect(vm.$el.querySelector('a.scoped-label').href).toContain( - props.scopedLabelsDocumentationLink, - ); + expect(wrapper.find(GlLink).attributes('href')).toBe('/docs-link'); }); }); diff --git a/spec/frontend/boards/components/issue_due_date_spec.js b/spec/frontend/boards/components/issue_due_date_spec.js index 8cb1d963851450e0eed06319b9c81f5df816a1e2..880859287e13a9c86191751aba1c2f5dfcee987c 100644 --- a/spec/frontend/boards/components/issue_due_date_spec.js +++ b/spec/frontend/boards/components/issue_due_date_spec.js @@ -1,72 +1,78 @@ -import Vue from 'vue'; +import { shallowMount } from '@vue/test-utils'; import dateFormat from 'dateformat'; import IssueDueDate from '~/boards/components/issue_due_date.vue'; -import mountComponent from '../../helpers/vue_mount_component_helper'; + +const createComponent = (dueDate = new Date(), closed = false) => + shallowMount(IssueDueDate, { + propsData: { + closed, + date: dateFormat(dueDate, 'yyyy-mm-dd', true), + }, + }); + +const findTime = wrapper => wrapper.find('time'); describe('Issue Due Date component', () => { - let vm; + let wrapper; let date; - const Component = Vue.extend(IssueDueDate); - const createComponent = (dueDate = new Date(), closed = false) => - mountComponent(Component, { closed, date: dateFormat(dueDate, 'yyyy-mm-dd', true) }); beforeEach(() => { date = new Date(); - vm = createComponent(); }); afterEach(() => { - vm.$destroy(); + wrapper.destroy(); }); it('should render "Today" if the due date is today', () => { - const timeContainer = vm.$el.querySelector('time'); + wrapper = createComponent(); - expect(timeContainer.textContent.trim()).toEqual('Today'); + expect(findTime(wrapper).text()).toBe('Today'); }); it('should render "Yesterday" if the due date is yesterday', () => { date.setDate(date.getDate() - 1); - vm = createComponent(date); + wrapper = createComponent(date); - expect(vm.$el.querySelector('time').textContent.trim()).toEqual('Yesterday'); + expect(findTime(wrapper).text()).toBe('Yesterday'); }); it('should render "Tomorrow" if the due date is one day from now', () => { date.setDate(date.getDate() + 1); - vm = createComponent(date); + wrapper = createComponent(date); - expect(vm.$el.querySelector('time').textContent.trim()).toEqual('Tomorrow'); + expect(findTime(wrapper).text()).toBe('Tomorrow'); }); it('should render day of the week if due date is one week away', () => { date.setDate(date.getDate() + 5); - vm = createComponent(date); + wrapper = createComponent(date); - expect(vm.$el.querySelector('time').textContent.trim()).toEqual(dateFormat(date, 'dddd')); + expect(findTime(wrapper).text()).toBe(dateFormat(date, 'dddd')); }); it('should render month and day for other dates', () => { date.setDate(date.getDate() + 17); - vm = createComponent(date); + wrapper = createComponent(date); const today = new Date(); const isDueInCurrentYear = today.getFullYear() === date.getFullYear(); const format = isDueInCurrentYear ? 'mmm d' : 'mmm d, yyyy'; - expect(vm.$el.querySelector('time').textContent.trim()).toEqual(dateFormat(date, format)); + expect(findTime(wrapper).text()).toBe(dateFormat(date, format)); }); it('should contain the correct `.text-danger` css class for overdue issue that is open', () => { date.setDate(date.getDate() - 17); - vm = createComponent(date); + wrapper = createComponent(date); - expect(vm.$el.querySelector('time').classList.contains('text-danger')).toEqual(true); + expect(findTime(wrapper).classes('text-danger')).toBe(true); }); it('should not contain the `.text-danger` css class for overdue issue that is closed', () => { date.setDate(date.getDate() - 17); - vm = createComponent(date, true); + const closed = true; + wrapper = createComponent(date, closed); - expect(vm.$el.querySelector('time').classList.contains('text-danger')).toEqual(false); + expect(findTime(wrapper).classes('text-danger')).toBe(false); }); }); diff --git a/spec/frontend/diffs/components/diff_table_cell_spec.js b/spec/frontend/diffs/components/diff_table_cell_spec.js index 1af0746f3bd6eb9d9dc278822b90b69c9652b888..ad70b5695cc80eb34644592f4d7bda99d800bda7 100644 --- a/spec/frontend/diffs/components/diff_table_cell_spec.js +++ b/spec/frontend/diffs/components/diff_table_cell_spec.js @@ -155,6 +155,10 @@ describe('DiffTableCell', () => { }); }); + it('renders the correct line number', () => { + expect(findLineNumber().text()).toEqual(TEST_LINE_NUMBER.toString()); + }); + it('on click, dispatches setHighlightedRow', () => { expect(store.dispatch).not.toHaveBeenCalled(); diff --git a/spec/frontend/notes/stores/getters_spec.js b/spec/frontend/notes/stores/getters_spec.js index 83417bd70efa32540c31d57b08242839717cdc82..1ade94641288fe334ad06c24d4fb8bc8b4f378ef 100644 --- a/spec/frontend/notes/stores/getters_spec.js +++ b/spec/frontend/notes/stores/getters_spec.js @@ -35,6 +35,7 @@ describe('Getters Notes Store', () => { notesData: notesDataMock, userData: userDataMock, noteableData: noteableDataMock, + descriptionVersion: 'descriptionVersion', }; }); @@ -385,4 +386,10 @@ describe('Getters Notes Store', () => { expect(getters.getDiscussion(state)('1')).toEqual({ id: '1' }); }); }); + + describe('descriptionVersion', () => { + it('should return `descriptionVersion`', () => { + expect(getters.descriptionVersion(state)).toEqual('descriptionVersion'); + }); + }); }); diff --git a/spec/frontend/vue_shared/components/issue/related_issuable_mock_data.js b/spec/frontend/vue_shared/components/issue/related_issuable_mock_data.js new file mode 100644 index 0000000000000000000000000000000000000000..5f69d761fdf88137836e0b0bc7b7d63380c96ed9 --- /dev/null +++ b/spec/frontend/vue_shared/components/issue/related_issuable_mock_data.js @@ -0,0 +1,121 @@ +export const defaultProps = { + endpoint: '/foo/bar/issues/1/related_issues', + currentNamespacePath: 'foo', + currentProjectPath: 'bar', +}; + +export const issuable1 = { + id: 200, + epicIssueId: 1, + confidential: false, + reference: 'foo/bar#123', + displayReference: '#123', + title: 'some title', + path: '/foo/bar/issues/123', + relationPath: '/foo/bar/issues/123/relation', + state: 'opened', + linkType: 'relates_to', + dueDate: '2010-11-22', + weight: 5, +}; + +export const issuable2 = { + id: 201, + epicIssueId: 2, + confidential: false, + reference: 'foo/bar#124', + displayReference: '#124', + title: 'some other thing', + path: '/foo/bar/issues/124', + relationPath: '/foo/bar/issues/124/relation', + state: 'opened', + linkType: 'blocks', +}; + +export const issuable3 = { + id: 202, + epicIssueId: 3, + confidential: false, + reference: 'foo/bar#125', + displayReference: '#125', + title: 'some other other thing', + path: '/foo/bar/issues/125', + relationPath: '/foo/bar/issues/125/relation', + state: 'opened', + linkType: 'is_blocked_by', +}; + +export const issuable4 = { + id: 203, + epicIssueId: 4, + confidential: false, + reference: 'foo/bar#126', + displayReference: '#126', + title: 'some other other other thing', + path: '/foo/bar/issues/126', + relationPath: '/foo/bar/issues/126/relation', + state: 'opened', +}; + +export const issuable5 = { + id: 204, + epicIssueId: 5, + confidential: false, + reference: 'foo/bar#127', + displayReference: '#127', + title: 'some other other other thing', + path: '/foo/bar/issues/127', + relationPath: '/foo/bar/issues/127/relation', + state: 'opened', +}; + +export const defaultMilestone = { + id: 1, + state: 'active', + title: 'Milestone title', + start_date: '2018-01-01', + due_date: '2019-12-31', +}; + +export const defaultAssignees = [ + { + id: 1, + name: 'Administrator', + username: 'root', + state: 'active', + avatar_url: `${gl.TEST_HOST}`, + web_url: `${gl.TEST_HOST}/root`, + status_tooltip_html: null, + path: '/root', + }, + { + id: 13, + name: 'Brooks Beatty', + username: 'brynn_champlin', + state: 'active', + avatar_url: `${gl.TEST_HOST}`, + web_url: `${gl.TEST_HOST}/brynn_champlin`, + status_tooltip_html: null, + path: '/brynn_champlin', + }, + { + id: 6, + name: 'Bryce Turcotte', + username: 'melynda', + state: 'active', + avatar_url: `${gl.TEST_HOST}`, + web_url: `${gl.TEST_HOST}/melynda`, + status_tooltip_html: null, + path: '/melynda', + }, + { + id: 20, + name: 'Conchita Eichmann', + username: 'juliana_gulgowski', + state: 'active', + avatar_url: `${gl.TEST_HOST}`, + web_url: `${gl.TEST_HOST}/juliana_gulgowski`, + status_tooltip_html: null, + path: '/juliana_gulgowski', + }, +]; diff --git a/spec/javascripts/vue_shared/components/content_viewer/content_viewer_spec.js b/spec/javascripts/vue_shared/components/content_viewer/content_viewer_spec.js index e2a1ed931f1e4b4d8e9e186dbfc19b45ef17c23b..fbe9337ecf4277c23bf5cd20b10e803b9677c0e3 100644 --- a/spec/javascripts/vue_shared/components/content_viewer/content_viewer_spec.js +++ b/spec/javascripts/vue_shared/components/content_viewer/content_viewer_spec.js @@ -1,6 +1,7 @@ import Vue from 'vue'; import MockAdapter from 'axios-mock-adapter'; import mountComponent from 'spec/helpers/vue_mount_component_helper'; +import waitForPromises from 'spec/helpers/wait_for_promises'; import { GREEN_BOX_IMAGE_URL } from 'spec/test_constants'; import axios from '~/lib/utils/axios_utils'; import contentViewer from '~/vue_shared/components/content_viewer/content_viewer.vue'; @@ -22,7 +23,7 @@ describe('ContentViewer', () => { it('markdown preview renders + loads rendered markdown from server', done => { mock = new MockAdapter(axios); - mock.onPost(`${gon.relative_url_root}/testproject/preview_markdown`).reply(200, { + mock.onPost(`${gon.relative_url_root}/testproject/preview_markdown`).replyOnce(200, { body: '<b>testing</b>', }); @@ -33,13 +34,12 @@ describe('ContentViewer', () => { type: 'markdown', }); - const previewContainer = vm.$el.querySelector('.md-previewer'); - - setTimeout(() => { - expect(previewContainer.textContent).toContain('testing'); - - done(); - }); + waitForPromises() + .then(() => { + expect(vm.$el.querySelector('.md-previewer').textContent).toContain('testing'); + }) + .then(done) + .catch(done.fail); }); it('renders image preview', done => { @@ -49,11 +49,12 @@ describe('ContentViewer', () => { type: 'image', }); - setTimeout(() => { - expect(vm.$el.querySelector('img').getAttribute('src')).toBe(GREEN_BOX_IMAGE_URL); - - done(); - }); + vm.$nextTick() + .then(() => { + expect(vm.$el.querySelector('img').getAttribute('src')).toBe(GREEN_BOX_IMAGE_URL); + }) + .then(done) + .catch(done.fail); }); it('renders fallback download control', done => { @@ -62,18 +63,19 @@ describe('ContentViewer', () => { fileSize: 1024, }); - setTimeout(() => { - expect( - vm.$el - .querySelector('.file-info') - .textContent.trim() - .replace(/\s+/, ' '), - ).toEqual('test.abc (1.00 KiB)'); - - expect(vm.$el.querySelector('.btn.btn-default').textContent.trim()).toEqual('Download'); - - done(); - }); + vm.$nextTick() + .then(() => { + expect( + vm.$el + .querySelector('.file-info') + .textContent.trim() + .replace(/\s+/, ' '), + ).toEqual('test.abc (1.00 KiB)'); + + expect(vm.$el.querySelector('.btn.btn-default').textContent.trim()).toEqual('Download'); + }) + .then(done) + .catch(done.fail); }); it('renders fallback download control for file with a data URL path properly', done => { @@ -82,13 +84,14 @@ describe('ContentViewer', () => { filePath: 'somepath/test.abc', }); - setTimeout(() => { - expect(vm.$el.querySelector('.file-info').textContent.trim()).toEqual('test.abc'); - expect(vm.$el.querySelector('.btn.btn-default')).toHaveAttr('download', 'test.abc'); - expect(vm.$el.querySelector('.btn.btn-default').textContent.trim()).toEqual('Download'); - - done(); - }); + vm.$nextTick() + .then(() => { + expect(vm.$el.querySelector('.file-info').textContent.trim()).toEqual('test.abc'); + expect(vm.$el.querySelector('.btn.btn-default')).toHaveAttr('download', 'test.abc'); + expect(vm.$el.querySelector('.btn.btn-default').textContent.trim()).toEqual('Download'); + }) + .then(done) + .catch(done.fail); }); it('markdown preview receives the file path as a parameter', done => { @@ -106,14 +109,15 @@ describe('ContentViewer', () => { filePath: 'foo/test.md', }); - setTimeout(() => { - expect(axios.post).toHaveBeenCalledWith( - `${gon.relative_url_root}/testproject/preview_markdown`, - { path: 'foo/test.md', text: '* Test' }, - jasmine.any(Object), - ); - - done(); - }); + vm.$nextTick() + .then(() => { + expect(axios.post).toHaveBeenCalledWith( + `${gon.relative_url_root}/testproject/preview_markdown`, + { path: 'foo/test.md', text: '* Test' }, + jasmine.any(Object), + ); + }) + .then(done) + .catch(done.fail); }); }); diff --git a/spec/javascripts/vue_shared/components/issue/related_issuable_mock_data.js b/spec/javascripts/vue_shared/components/issue/related_issuable_mock_data.js index d3dc169ddabb6ff429a9f7cd10853eef0c18c893..3c42f0c2aa976860b674b36b8453701dffb268b2 100644 --- a/spec/javascripts/vue_shared/components/issue/related_issuable_mock_data.js +++ b/spec/javascripts/vue_shared/components/issue/related_issuable_mock_data.js @@ -1,116 +1 @@ -export const defaultProps = { - endpoint: '/foo/bar/issues/1/related_issues', - currentNamespacePath: 'foo', - currentProjectPath: 'bar', -}; - -export const issuable1 = { - id: 200, - epicIssueId: 1, - confidential: false, - reference: 'foo/bar#123', - displayReference: '#123', - title: 'some title', - path: '/foo/bar/issues/123', - state: 'opened', - linkType: 'relates_to', - dueDate: '2010-11-22', - weight: 5, -}; - -export const issuable2 = { - id: 201, - epicIssueId: 2, - confidential: false, - reference: 'foo/bar#124', - displayReference: '#124', - title: 'some other thing', - path: '/foo/bar/issues/124', - state: 'opened', - linkType: 'blocks', -}; - -export const issuable3 = { - id: 202, - epicIssueId: 3, - confidential: false, - reference: 'foo/bar#125', - displayReference: '#125', - title: 'some other other thing', - path: '/foo/bar/issues/125', - state: 'opened', - linkType: 'is_blocked_by', -}; - -export const issuable4 = { - id: 203, - epicIssueId: 4, - confidential: false, - reference: 'foo/bar#126', - displayReference: '#126', - title: 'some other other other thing', - path: '/foo/bar/issues/126', - state: 'opened', -}; - -export const issuable5 = { - id: 204, - epicIssueId: 5, - confidential: false, - reference: 'foo/bar#127', - displayReference: '#127', - title: 'some other other other thing', - path: '/foo/bar/issues/127', - state: 'opened', -}; - -export const defaultMilestone = { - id: 1, - state: 'active', - title: 'Milestone title', - start_date: '2018-01-01', - due_date: '2019-12-31', -}; - -export const defaultAssignees = [ - { - id: 1, - name: 'Administrator', - username: 'root', - state: 'active', - avatar_url: `${gl.TEST_HOST}`, - web_url: `${gl.TEST_HOST}/root`, - status_tooltip_html: null, - path: '/root', - }, - { - id: 13, - name: 'Brooks Beatty', - username: 'brynn_champlin', - state: 'active', - avatar_url: `${gl.TEST_HOST}`, - web_url: `${gl.TEST_HOST}/brynn_champlin`, - status_tooltip_html: null, - path: '/brynn_champlin', - }, - { - id: 6, - name: 'Bryce Turcotte', - username: 'melynda', - state: 'active', - avatar_url: `${gl.TEST_HOST}`, - web_url: `${gl.TEST_HOST}/melynda`, - status_tooltip_html: null, - path: '/melynda', - }, - { - id: 20, - name: 'Conchita Eichmann', - username: 'juliana_gulgowski', - state: 'active', - avatar_url: `${gl.TEST_HOST}`, - web_url: `${gl.TEST_HOST}/juliana_gulgowski`, - status_tooltip_html: null, - path: '/juliana_gulgowski', - }, -]; +export * from '../../../../frontend/vue_shared/components/issue/related_issuable_mock_data'; diff --git a/spec/lib/gitlab/database/migration_helpers_spec.rb b/spec/lib/gitlab/database/migration_helpers_spec.rb index ce6e8c731e2927a6e816d001dee82d39c0fb47b0..1fd6157ce43c773c5a4c7b133c7bcfc3efc41259 100644 --- a/spec/lib/gitlab/database/migration_helpers_spec.rb +++ b/spec/lib/gitlab/database/migration_helpers_spec.rb @@ -383,7 +383,8 @@ it 'raises an error' do expect(model).to receive(:foreign_key_exists?).and_return(false) - expect { model.validate_foreign_key(:projects, :user_id) }.to raise_error(/cannot find/) + error_message = /Could not find foreign key "fk_name" on table "projects"/ + expect { model.validate_foreign_key(:projects, :user_id, name: :fk_name) }.to raise_error(error_message) end end end @@ -587,6 +588,8 @@ end describe '#add_column_with_default' do + let(:column) { Project.columns.find { |c| c.name == "id" } } + context 'outside of a transaction' do context 'when a column limit is not set' do before do @@ -601,6 +604,9 @@ expect(model).to receive(:change_column_default) .with(:projects, :foo, 10) + + expect(model).to receive(:column_for) + .with(:projects, :foo).and_return(column) end it 'adds the column while allowing NULL values' do @@ -655,6 +661,7 @@ it 'adds the column with a limit' do allow(model).to receive(:transaction_open?).and_return(false) allow(model).to receive(:transaction).and_yield + allow(model).to receive(:column_for).with(:projects, :foo).and_return(column) allow(model).to receive(:update_column_in_batches).with(:projects, :foo, 10) allow(model).to receive(:change_column_null).with(:projects, :foo, false) allow(model).to receive(:change_column_default).with(:projects, :foo, 10) @@ -721,50 +728,68 @@ before do allow(model).to receive(:transaction_open?).and_return(false) - allow(model).to receive(:column_for).and_return(old_column) end - it 'renames a column concurrently' do - expect(model).to receive(:check_trigger_permissions!).with(:users) + context 'when the column to rename exists' do + before do + allow(model).to receive(:column_for).and_return(old_column) + end - expect(model).to receive(:install_rename_triggers_for_postgresql) - .with(trigger_name, '"users"', '"old"', '"new"') + it 'renames a column concurrently' do + expect(model).to receive(:check_trigger_permissions!).with(:users) - expect(model).to receive(:add_column) - .with(:users, :new, :integer, - limit: old_column.limit, - precision: old_column.precision, - scale: old_column.scale) + expect(model).to receive(:install_rename_triggers_for_postgresql) + .with(trigger_name, '"users"', '"old"', '"new"') - expect(model).to receive(:change_column_default) - .with(:users, :new, old_column.default) + expect(model).to receive(:add_column) + .with(:users, :new, :integer, + limit: old_column.limit, + precision: old_column.precision, + scale: old_column.scale) - expect(model).to receive(:update_column_in_batches) + expect(model).to receive(:change_column_default) + .with(:users, :new, old_column.default) - expect(model).to receive(:change_column_null).with(:users, :new, false) + expect(model).to receive(:update_column_in_batches) - expect(model).to receive(:copy_indexes).with(:users, :old, :new) - expect(model).to receive(:copy_foreign_keys).with(:users, :old, :new) + expect(model).to receive(:change_column_null).with(:users, :new, false) + + expect(model).to receive(:copy_indexes).with(:users, :old, :new) + expect(model).to receive(:copy_foreign_keys).with(:users, :old, :new) + + model.rename_column_concurrently(:users, :old, :new) + end - model.rename_column_concurrently(:users, :old, :new) + context 'when default is false' do + let(:old_column) do + double(:column, + type: :boolean, + limit: nil, + default: false, + null: false, + precision: nil, + scale: nil) + end + + it 'copies the default to the new column' do + expect(model).to receive(:change_column_default) + .with(:users, :new, old_column.default) + + model.rename_column_concurrently(:users, :old, :new) + end + end end - context 'when default is false' do - let(:old_column) do - double(:column, - type: :boolean, - limit: nil, - default: false, - null: false, - precision: nil, - scale: nil) + context 'when the column to be renamed does not exist' do + before do + allow(model).to receive(:columns).and_return([]) end - it 'copies the default to the new column' do - expect(model).to receive(:change_column_default) - .with(:users, :new, old_column.default) + it 'raises an error with appropriate message' do + expect(model).to receive(:check_trigger_permissions!).with(:users) - model.rename_column_concurrently(:users, :old, :new) + error_message = /Could not find column "missing_column" on table "users"/ + expect { model.rename_column_concurrently(:users, :missing_column, :new) }.to raise_error(error_message) end end end @@ -1133,8 +1158,9 @@ expect(column.name).to eq('id') end - it 'returns nil when a column does not exist' do - expect(model.column_for(:users, :kittens)).to be_nil + it 'raises an error when a column does not exist' do + error_message = /Could not find column "kittens" on table "users"/ + expect { model.column_for(:users, :kittens) }.to raise_error(error_message) end end diff --git a/spec/lib/gitlab/import_export/all_models.yml b/spec/lib/gitlab/import_export/all_models.yml index f6a3ade7f1804a9252274ba5ef03c0c108ed5e5f..7740c34702b275602fa0745ce2e6929f855a1de3 100644 --- a/spec/lib/gitlab/import_export/all_models.yml +++ b/spec/lib/gitlab/import_export/all_models.yml @@ -609,3 +609,6 @@ epic: - events - resource_label_events - user_mentions +epic_issue: +- epic +- issue diff --git a/spec/lib/gitlab/import_export/safe_model_attributes.yml b/spec/lib/gitlab/import_export/safe_model_attributes.yml index 1cd3071ac684f28f2e991f0e36dfdc910358f7df..0b5fb7e4a9f306e116f1d4969df4761ac506fba3 100644 --- a/spec/lib/gitlab/import_export/safe_model_attributes.yml +++ b/spec/lib/gitlab/import_export/safe_model_attributes.yml @@ -832,3 +832,6 @@ Epic: - start_date_sourcing_epic_id - due_date_sourcing_epic_id - health_status +EpicIssue: + - id + - relative_position diff --git a/spec/lib/marginalia_spec.rb b/spec/lib/marginalia_spec.rb index db428bb65c490b09f59926f9fb0ef392e32d7442..d4b84c5cdc4267ac6d686860fcdb67402875bf9a 100644 --- a/spec/lib/marginalia_spec.rb +++ b/spec/lib/marginalia_spec.rb @@ -59,7 +59,6 @@ def make_request(correlation_id) "application" => "test", "controller" => "marginalia_test", "action" => "first_user", - "line" => "/spec/support/helpers/query_recorder.rb", "correlation_id" => correlation_id } end @@ -116,7 +115,6 @@ def make_request(correlation_id) { "application" => "sidekiq", "job_class" => "MarginaliaTestJob", - "line" => "/spec/support/sidekiq_middleware.rb", "correlation_id" => sidekiq_job['correlation_id'], "jid" => sidekiq_job['jid'] } @@ -145,7 +143,6 @@ def make_request(correlation_id) let(:component_map) do { "application" => "sidekiq", - "line" => "/lib/gitlab/i18n.rb", "jid" => delivery_job.job_id, "job_class" => delivery_job.arguments.first } diff --git a/spec/models/concerns/cache_markdown_field_spec.rb b/spec/models/concerns/cache_markdown_field_spec.rb index 06d12c14793deee647eb5f015472e490498e473b..697a9e985050c060f80b8ba7a7d20940ba77c2cf 100644 --- a/spec/models/concerns/cache_markdown_field_spec.rb +++ b/spec/models/concerns/cache_markdown_field_spec.rb @@ -230,6 +230,26 @@ def thing_subclass(klass, extra_attribute) end end end + + describe '#rendered_field_content' do + let(:thing) { klass.new(description: markdown, description_html: nil, cached_markdown_version: cache_version) } + + context 'when a field can be cached' do + it 'returns the html' do + thing.description = updated_markdown + + expect(thing.rendered_field_content(:description)).to eq updated_html + end + end + + context 'when a field cannot be cached' do + it 'returns nil' do + allow(thing).to receive(:can_cache_field?).with(:description).and_return false + + expect(thing.rendered_field_content(:description)).to eq nil + end + end + end end context 'for Active record classes' do diff --git a/spec/models/group_spec.rb b/spec/models/group_spec.rb index 3531c695236ea5fc907b9969e4b9c57931ec6941..b5ed29189fd2963ad2fc6a6dda8b09020780bbe2 100644 --- a/spec/models/group_spec.rb +++ b/spec/models/group_spec.rb @@ -911,6 +911,16 @@ def setup_group_members(group) subject { group.ci_variables_for('ref', project) } + it 'memoizes the result by ref', :request_store do + expect(project).to receive(:protected_for?).with('ref').once.and_return(true) + expect(project).to receive(:protected_for?).with('other').once.and_return(false) + + 2.times do + expect(group.ci_variables_for('ref', project)).to contain_exactly(ci_variable, protected_variable) + expect(group.ci_variables_for('other', project)).to contain_exactly(ci_variable) + end + end + shared_examples 'ref is protected' do it 'contains all the variables' do is_expected.to contain_exactly(ci_variable, protected_variable) diff --git a/spec/models/project_spec.rb b/spec/models/project_spec.rb index e7deae38b4676f28826ff6c2dd3c766b54c46b69..2b4a832634f0d0bb35f873f9d8d8381662b123a7 100644 --- a/spec/models/project_spec.rb +++ b/spec/models/project_spec.rb @@ -2930,6 +2930,19 @@ end end + it 'memoizes the result by ref and environment', :request_store do + scoped_variable = create(:ci_variable, value: 'secret', project: project, environment_scope: 'scoped') + + expect(project).to receive(:protected_for?).with('ref').once.and_return(true) + expect(project).to receive(:protected_for?).with('other').twice.and_return(false) + + 2.times do + expect(project.reload.ci_variables_for(ref: 'ref', environment: 'production')).to contain_exactly(ci_variable, protected_variable) + expect(project.reload.ci_variables_for(ref: 'other')).to contain_exactly(ci_variable) + expect(project.reload.ci_variables_for(ref: 'other', environment: 'scoped')).to contain_exactly(ci_variable, scoped_variable) + end + end + context 'when the ref is not protected' do before do allow(project).to receive(:protected_for?).with('ref').and_return(false) diff --git a/spec/models/resource_label_event_spec.rb b/spec/models/resource_label_event_spec.rb index a92f5ee93e184450f69db79d6a2f03c74554d103..ca887b485a215c925613f10072588f25cbaa1110 100644 --- a/spec/models/resource_label_event_spec.rb +++ b/spec/models/resource_label_event_spec.rb @@ -10,6 +10,10 @@ it_behaves_like 'having unique enum values' + it_behaves_like 'a resource event' + it_behaves_like 'a resource event for issues' + it_behaves_like 'a resource event for merge requests' + describe 'associations' do it { is_expected.to belong_to(:user) } it { is_expected.to belong_to(:issue) } diff --git a/spec/models/resource_weight_event_spec.rb b/spec/models/resource_weight_event_spec.rb index 2f00204512e5c780472e3158e3dda6304a9b067b..11b633e1dcf7c3c1852f52baef3bde37b57274af 100644 --- a/spec/models/resource_weight_event_spec.rb +++ b/spec/models/resource_weight_event_spec.rb @@ -3,6 +3,9 @@ require 'spec_helper' RSpec.describe ResourceWeightEvent, type: :model do + it_behaves_like 'a resource event' + it_behaves_like 'a resource event for issues' + let_it_be(:user1) { create(:user) } let_it_be(:user2) { create(:user) } @@ -11,13 +14,11 @@ let_it_be(:issue3) { create(:issue, author: user2) } describe 'validations' do - it { is_expected.not_to allow_value(nil).for(:user) } it { is_expected.not_to allow_value(nil).for(:issue) } it { is_expected.to allow_value(nil).for(:weight) } end describe 'associations' do - it { is_expected.to belong_to(:user) } it { is_expected.to belong_to(:issue) } end diff --git a/spec/models/snippet_spec.rb b/spec/models/snippet_spec.rb index 1265b95736dcf108434c5b54640ee47f4d1b6ceb..cb7b996188016841e320d47d059c56dd391e205a 100644 --- a/spec/models/snippet_spec.rb +++ b/spec/models/snippet_spec.rb @@ -632,4 +632,26 @@ def to_json(params = {}) end end end + + describe '#can_cache_field?' do + using RSpec::Parameterized::TableSyntax + + let(:snippet) { create(:snippet, file_name: file_name) } + + subject { snippet.can_cache_field?(field) } + + where(:field, :file_name, :result) do + :title | nil | true + :title | 'foo.bar' | true + :description | nil | true + :description | 'foo.bar' | true + :content | nil | false + :content | 'bar.foo' | false + :content | 'markdown.md' | true + end + + with_them do + it { is_expected.to eq result } + end + end end diff --git a/spec/services/projects/operations/update_service_spec.rb b/spec/services/projects/operations/update_service_spec.rb index 182906a33370d90845983a8f289958e13735ced4..de028ecb6935107217da9d2dddfac320b269e84a 100644 --- a/spec/services/projects/operations/update_service_spec.rb +++ b/spec/services/projects/operations/update_service_spec.rb @@ -298,55 +298,28 @@ manual_configuration: "0" }) end - let(:prometheus_params) do - { - "type" => "PrometheusService", - "title" => nil, - "active" => true, - "properties" => { "api_url" => "http://example.prometheus.com", "manual_configuration" => "0" }, - "push_events" => true, - "issues_events" => true, - "merge_requests_events" => true, - "tag_push_events" => true, - "note_events" => true, - "category" => "monitoring", - "default" => false, - "wiki_page_events" => true, - "pipeline_events" => true, - "confidential_issues_events" => true, - "commit_events" => true, - "job_events" => true, - "confidential_note_events" => true, - "deployment_events" => false, - "description" => nil, - "comment_on_event_enabled" => true, - "template" => false - } - end let(:params) do { prometheus_integration_attributes: { - api_url: 'http://new.prometheus.com', - manual_configuration: '1' + 'api_url' => 'http://new.prometheus.com', + 'manual_configuration' => '1' } } end it 'uses Project#find_or_initialize_service to include instance defined defaults and pass them to Projects::UpdateService', :aggregate_failures do project_update_service = double(Projects::UpdateService) - prometheus_update_params = prometheus_params.merge('properties' => { - 'api_url' => 'http://new.prometheus.com', - 'manual_configuration' => '1' - }) expect(project) .to receive(:find_or_initialize_service) .with('prometheus') .and_return(prometheus_service) - expect(Projects::UpdateService) - .to receive(:new) - .with(project, user, { prometheus_service_attributes: prometheus_update_params }) - .and_return(project_update_service) + expect(Projects::UpdateService).to receive(:new) do |project_arg, user_arg, update_params_hash| + expect(project_arg).to eq project + expect(user_arg).to eq user + expect(update_params_hash[:prometheus_service_attributes]).to include('properties' => { 'api_url' => 'http://new.prometheus.com', 'manual_configuration' => '1' }) + expect(update_params_hash[:prometheus_service_attributes]).not_to include(*%w(id project_id created_at updated_at)) + end.and_return(project_update_service) expect(project_update_service).to receive(:execute) subject.execute diff --git a/spec/support/shared_contexts/features/error_tracking_shared_context.rb b/spec/support/shared_contexts/features/error_tracking_shared_context.rb index 48356373c26230a60b94ee076a0646b101f568a7..cbd33dd109b1ac8c2cfa0112fd7a9327a9cc02fe 100644 --- a/spec/support/shared_contexts/features/error_tracking_shared_context.rb +++ b/spec/support/shared_contexts/features/error_tracking_shared_context.rb @@ -13,7 +13,7 @@ let(:issue_id) { issue_response['id'] } let(:issue_seen) { 1.year.ago.utc } let(:formatted_issue_seen) { issue_seen.strftime("%Y-%m-%d %-l:%M:%S%p %Z") } - let(:date_received) { 1.month.ago.utc } + let(:date_received) { 32.days.ago.utc } before do request_headers = { 'Authorization' => 'Bearer access_token_123', 'Content-Type' => 'application/json' } diff --git a/spec/support/shared_contexts/policies/group_policy_shared_context.rb b/spec/support/shared_contexts/policies/group_policy_shared_context.rb index 63ebbcb93f9512356e901042409702eefaff6ca8..3a306f80b3cc9bbd5a939571d5180b3ddcb2dd3b 100644 --- a/spec/support/shared_contexts/policies/group_policy_shared_context.rb +++ b/spec/support/shared_contexts/policies/group_policy_shared_context.rb @@ -7,6 +7,7 @@ let_it_be(:maintainer) { create(:user) } let_it_be(:owner) { create(:user) } let_it_be(:admin) { create(:admin) } + let_it_be(:non_group_member) { create(:user) } let_it_be(:group, refind: true) { create(:group, :private, :owner_subgroup_creation_only) } let(:guest_permissions) do diff --git a/spec/support/shared_examples/features/error_tracking_shared_example.rb b/spec/support/shared_examples/features/error_tracking_shared_example.rb index edc1f42f6462de0f8cca237abcf799eb2ada5726..922d2627bce845fa4a73524147ba020f1b092098 100644 --- a/spec/support/shared_examples/features/error_tracking_shared_example.rb +++ b/spec/support/shared_examples/features/error_tracking_shared_example.rb @@ -50,17 +50,21 @@ shared_examples 'error tracking show page' do it 'renders the error details' do + content = page.find(".content") + nav = page.find("nav.breadcrumbs") + header = page.find(".error-details-header") + release_short_version = issue_response['firstRelease']['shortVersion'] - expect(page).to have_content('1 month ago by raven.scripts.runner in main') - expect(page).to have_content(issue_response['metadata']['title']) - expect(page).to have_content('level: error') - expect(page).to have_content('Error Details') - expect(page).to have_content('GitLab Issue: https://gitlab.com/gitlab-org/gitlab/issues/1') - expect(page).to have_content("Sentry event: https://sentrytest.gitlab.com/sentry-org/sentry-project/issues/#{issue_id}") - expect(page).to have_content("First seen: 1 year ago (#{formatted_issue_seen}) Release: #{release_short_version}") - expect(page).to have_content('Events: 1') - expect(page).to have_content('Users: 0') + expect(header).to have_content('1 month ago by raven.scripts.runner in main') + expect(content).to have_content(issue_response['metadata']['title']) + expect(content).to have_content('level: error') + expect(nav).to have_content('Error Details') + expect(content).to have_content('GitLab Issue: https://gitlab.com/gitlab-org/gitlab/issues/1') + expect(content).to have_content("Sentry event: https://sentrytest.gitlab.com/sentry-org/sentry-project/issues/#{issue_id}") + expect(content).to have_content("First seen: 1 year ago (#{formatted_issue_seen}) Release: #{release_short_version}") + expect(content).to have_content('Events: 1') + expect(content).to have_content('Users: 0') end it 'renders the stack trace heading' do diff --git a/spec/support/shared_examples/resource_events.rb b/spec/support/shared_examples/resource_events.rb index d7e7349ad6c207f5a372b083f629d15d3a3a468b..963453666c90ff6fccd98d9986a8af25838e6244 100644 --- a/spec/support/shared_examples/resource_events.rb +++ b/spec/support/shared_examples/resource_events.rb @@ -10,8 +10,21 @@ let_it_be(:issue2) { create(:issue, author: user1) } let_it_be(:issue3) { create(:issue, author: user2) } + describe 'importable' do + it { is_expected.to respond_to(:importing?) } + it { is_expected.to respond_to(:imported?) } + end + describe 'validations' do it { is_expected.not_to allow_value(nil).for(:user) } + + context 'when importing' do + before do + allow(subject).to receive(:importing?).and_return(true) + end + + it { is_expected.to allow_value(nil).for(:user) } + end end describe 'associations' do diff --git a/spec/support_specs/helpers/active_record/query_recorder_spec.rb b/spec/support_specs/helpers/active_record/query_recorder_spec.rb index 48069c6a76659f5b6e2f22f2591a19d881c9fed2..0827ce37b07b5ed501971f362c0b4ae911924284 100644 --- a/spec/support_specs/helpers/active_record/query_recorder_spec.rb +++ b/spec/support_specs/helpers/active_record/query_recorder_spec.rb @@ -14,9 +14,6 @@ class TestQueries < ActiveRecord::Base TestQueries.first end - # Test first_only flag works as expected - expect(control.find_query(/.*query_recorder_spec.rb.*/, 0, first_only: true)) - .to eq(control.find_query(/.*query_recorder_spec.rb.*/, 0).first) # Check #find_query expect(control.find_query(/.*/, 0).size) .to eq(control.data.keys.size) @@ -32,9 +29,7 @@ class TestQueries < ActiveRecord::Base # Ensure memoization value match the raw value above expect(control.count).to eq(control.log.size) # Ensure we have only two sources of queries - expect(control.data.keys.size).to eq(2) - # Ensure we detect only queries from this file - expect(control.data.keys.find_all { |i| i.match(/query_recorder_spec.rb/) }.count).to eq(2) + expect(control.data.keys.size).to eq(1) end end end