diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index dbd44b4a..5c7190be 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1 +1,3 @@ -*.ts @coderabbitai/typescript-reviewers +docusaurus.config.ts @coderabbitai/typescript-reviewers +src/**/*.ts @coderabbitai/typescript-reviewers +src/**/*.tsx @coderabbitai/react-reviewers diff --git a/README.md b/README.md index bd76c6c5..8167aa97 100644 --- a/README.md +++ b/README.md @@ -45,7 +45,7 @@ Welcome to the official docs for [**CodeRabbit**](https://coderabbit.ai), the co 2. Clone this repository: ```sh - git clone https://github.com/coderabbit-ai/coderabbit-docs.git + git clone https://github.com/coderabbitai/coderabbit-docs.git cd coderabbit-docs ``` diff --git a/docs/changelog.md b/docs/changelog.md index d273ffcb..9fe435ad 100644 --- a/docs/changelog.md +++ b/docs/changelog.md @@ -5,6 +5,28 @@ description: The latest updates and changes to CodeRabbit. sidebar_position: 13 --- +## July 17, 2025 + +### Enhanced Reporting Capabilities + +We're excited to announce significant improvements to our reporting system that will make your automated reports more powerful and actionable! + +#### Exclusion Filters with NOT_IN Option + +Reports now support exclusion filters using the `NOT_IN` option, allowing you to filter out pull requests based on their repository, label, user, or team. This gives you more granular control over what appears in your reports, helping you focus on the most relevant information. + +#### Next Steps Section in Daily Standup Reports + +We've enhanced the default daily standup report template to include a "Next Steps" section. This provides clear guidance on what actions authors and reviewers should take regarding each pull request or change, making your reports more actionable and helping teams stay on track. + +#### CI/CD GitHub Action Check Status Awareness + +Reports now include CI/CD GitHub Action check status as a data source. Your reports will be aware of which CI/CD checks are failing or passing, giving you better visibility into the health of your pull requests and helping identify potential issues before they become blockers. + +#### Score Card Custom Report Option + +We've added a new optional score card report feature that allows you to grade and create report cards for your developers. This custom report optional data source provides a structured way to evaluate developer performance and contributions. See our [custom reports documentation](/guides/custom-reports#remove-prs-without-a-score-cardchart-bot-comment) for more details on how to implement score cards in your reports. + ## July 10, 2025 ### Enhanced Python Static Analysis: nbqa Support for Jupyter Notebooks diff --git a/docs/guides/config-vscode.md b/docs/guides/config-vscode.md index 02f60d8c..41afc157 100644 --- a/docs/guides/config-vscode.md +++ b/docs/guides/config-vscode.md @@ -28,17 +28,21 @@ The **Agent Type** setting lets you choose the extension's response to using the - **Native**: The extension prompts the AI agent associated with your IDE to apply the suggested fix. - This works only with VSCode, using Copilot. If you have this option selected when using a different IDE, then the extension instead copies the prompt to your clipboard. + This works only with VSCode(using Copilot) and Cursor(you have to start the task). If you have this option selected when using a different IDE, then the extension instead copies the prompt to your clipboard. -- **Claude Code**: The extension opens the Terminal pane of your IDE and tries to use the `claude` command-line program to apply the suggested fix to your code. You need to have Claude Code installed for this option to be effective. +- **Claude Code**: The extension opens the Terminal pane of your IDE and tries to use the `claude` command-line program to apply the suggested fix to your code. You need to have [Claude Code](https://www.anthropic.com/claude-code) installed for this option to be effective. -- **Codex CLI**: The extension opens the Terminal pane of your IDE and tries to use the `codex` command-line program to apply the suggested fix to your code. You need to have Codex CLI installed for this option to be effective. +- **Codex CLI**: The extension opens the Terminal pane of your IDE and tries to use the `codex` command-line program to apply the suggested fix to your code. You need to have [Codex CLI](https://github.com/openai/codex) installed for this option to be effective. -- **Cline**: The extension opens the `Cline` sidebar and runs a task to apply the suggested fix to your code. You need to have the `Cline` extension installed for this option to be effective. +- **OpenCode**: The extension opens the Terminal pane of your IDE and tries to use the `opencode` command-line program to apply the suggested fix to your code. You need to have [OpenCode](https://opencode.ai) installed for this option to be effective. -- **Roo**: The extension opens the `Roo` sidebar and runs a task to apply the suggested fix to your code. You need to have the `Roo` extension installed for this option to be effective. +- **Cline**: The extension opens the `Cline` sidebar and runs a task to apply the suggested fix to your code. You need to have the [Cline](https://cline.bot/) extension installed for this option to be effective. -- **Kilo Code**: The extension opens the `Kilo Code` sidebar and runs a task to apply the suggested fix to your code. You need to have the `Kilo Code` extension installed for this option to be effective. +- **Roo**: The extension opens the `Roo` sidebar and runs a task to apply the suggested fix to your code. You need to have the [Roo](https://github.com/RooCodeInc/Roo-Code) extension installed for this option to be effective. + +- **Kilo Code**: The extension opens the `Kilo Code` sidebar and runs a task to apply the suggested fix to your code. You need to have the [Kilo Code](https://kilocode.ai/) extension installed for this option to be effective. + +- **Augment Code**: The extension opens the `Augment Code` sidebar with the prompt to apply the suggested fix to your code, and you can start the task. You need to have the [Augment Code](https://www.augmentcode.com/) extension installed for this option to be effective. - **Clipboard**: The extension copies prompt text describing the suggested fix to your clipboard. From there, you can manually paste the prompt into the coding AI agent that you use with your IDE. diff --git a/docs/guides/custom-reports.md b/docs/guides/custom-reports.md index a22b359e..b2a10504 100644 --- a/docs/guides/custom-reports.md +++ b/docs/guides/custom-reports.md @@ -117,6 +117,14 @@ Contained within the `` tag. - ``: datetime - The date and time the comment was last updated. - ``: markdown - The content of the comment. +#### CI/CD Check Status + +- ``: array of check objects - Contains all CI/CD checks for the PR. _GitHub Only_. +- ``: object - Each individual check is wrapped in this tag and is an object with the following properties: + - ``: string - The name of the CI/CD check. + - ``: string - The status of the check (e.g., "success", "failure", "in_progress", "canceled"). + - ``: string - The URL to view the detailed results of the check. + Here's an example prompt that uses these data points: ```text @@ -299,7 +307,7 @@ Issues and tickets brings in conversations, descriptions, and comments from Jira This option gives you the ability to create a report limited only to pull requests containing a "Score Card" or "Score Chart" bot comment from CodeRabbit or other bots. To enable issues and tickets you must include the tag `` in your prompt. -> **IMPORTANT:** This will automatically remove any pull requests from your reports if they do not contain a "Score Card" or "Score Chart" bot comment. Using this option without setting up a flow to create these comments will result in `No new pull request activity in the last XYZ hours` errors. Do not enable this option unless you have created a "Score Card" or "Score Chart" bot comment flow. +> **IMPORTANT:** This will automatically remove any pull requests from your reports if they do not contain a "Score Card" or "Score Chart" bot comment. Using this option without setting up a flow to create these comments will result in `No new pull request activity in the last XYZ hours` errors. Do not enable this option unless you have asked coderabbit to create a "Score Card" thourhg a comment or implimented the "Score Chart" bot comment flow below. For example you can ask coderabbit to check serveral conditions on a pull request and produce a "Score Chart": @@ -340,7 +348,7 @@ Generate a weekly code review report for the author of this pull request only fo - `1` → Passed **Final Score Calculation:** -Combine the scores from the parameters above to derive the final code quality score (out of 5). +Combine the scores from the parameters above to derive the final code quality score (out of 9). **Output Format:** Provide the final report in a table format with the following columns (use shorthand notations), be sure to include this list at the top above the chart in the "Column Notation" section so users understand what the columns mean: diff --git a/docs/guides/ondemand-reports.md b/docs/guides/ondemand-reports.md index ee385622..af380164 100644 --- a/docs/guides/ondemand-reports.md +++ b/docs/guides/ondemand-reports.md @@ -14,7 +14,7 @@ import ProPlanNotice from '@site/src/components/ProPlanNotice.mdx'; This page is about using the CodeRabbit API to generate on-demand reports about your organization's usage of CodeRabbit. For a conceptual overview of reports in CodeRabbit, see [Generate reports](/guides/reports-overview). -If you're new to CodeRabbit's reporting features, then we recommend starting with [Scheduled reports](/guides/scheduled-reports) to understand the available options and capabilities. +If you're new to CodeRabbit's reporting features, then we recommend starting with [Scheduled reports](/guides/scheduled-reports) to understand the available options and capabilities. In almost every scenario we recommend using the **Scheduled Reports** option. The **On-demand Report** does not have any additional benifits from the **Scheduled Reports** and has many limitations. ## API Access diff --git a/docs/guides/reports-overview.md b/docs/guides/reports-overview.md index 4d8759ed..b15329f0 100644 --- a/docs/guides/reports-overview.md +++ b/docs/guides/reports-overview.md @@ -17,16 +17,16 @@ You can track and analyze pull request activity across your organization's repos There are two ways to generate reports: -- **[Scheduled Reports](/guides/scheduled-reports)**: Set up automated, recurring reports that are delivered to your team on a set schedule. Supported delivery channels include the following: +- **[Scheduled Reports](/guides/scheduled-reports)**: Set up automated, recurring reports that are delivered to your team on a set schedule. **Use this option if you want to adjust your settings and send a report more than once**. Supported delivery channels include the following: - Email - Discord - Slack - Teams -- **[On-demand Reports](/guides/ondemand-reports)**: If you require customized integration of reports with your own tools and workflows, then you can generate reports programmatically through our API. +- **[On-demand Reports](/guides/ondemand-reports)**: If you want to test a report format and produce a single report once, try the "On-demand Reports" Option. You can also generate reports programmatically through our API, but we do not recommend this as the API reporting is in beta. If you want to run an **On-demand Report** more than once then you must use the **Scheduled Reports** option. -For your first reports, we recommend exploring scheduled reports. Its web-based UI can help you get familiar with the options available to you. +In almost every scenario we recommend using the **Scheduled Reports** option. The **On-demand Report** does not have any additional benifits from the **Scheduled Reports** and has many limitations. ## Customize reports using natural language {#customize} diff --git a/docs/guides/scheduled-reports.md b/docs/guides/scheduled-reports.md index 34c59032..cef41d46 100644 --- a/docs/guides/scheduled-reports.md +++ b/docs/guides/scheduled-reports.md @@ -52,11 +52,12 @@ Choose a time that works for all team members, especially for distributed teams ### Report Parameters -Reports can be filtered using multiple parameters: +Reports can be filtered using multiple parameters, match pull requests using the **IN** option and exclude pull requests using the **NOT_IN** option: - **Repositories**: Select specific repositories to monitor - **Labels**: Filter by GitHub labels with operators: - IN: Match any selected label + - NOT_IN: Exclude any pr with select label - ALL: Match all selected labels - **Users**: Filter by specific GitHub users - **Teams**: Filter by organization teams diff --git a/docs/platforms/azure-devops.md b/docs/platforms/azure-devops.md index 12b70765..b88ebcd7 100644 --- a/docs/platforms/azure-devops.md +++ b/docs/platforms/azure-devops.md @@ -94,9 +94,8 @@ Follow these steps to generate the token: 5. Select the organization you want to use the token with or select "All accessible organizations." 6. Enter a name and an expiry date for the token. -7. We need to have read & write access to "Work Items" & "Code" to post reviews - on pull requests. If you are on the Pro tier also add "Build" access for pipeline - failure remediation. -8. Click "Create" +7. Grant **Read, write, & manage** access to "Work Items" and "Code". CodeRabbit needs these permissions to post code reviews on pull requests. +8. If you subscribe to CodeRabbit Pro, then you can also grant **Read** access to "Build" for pipeline failure remediation. +9. Click "Create" ![CodeRabbit azure devOps personal access token creation form](/img/integrations/azure-access-token.png) diff --git a/docs/reference/configuration.md b/docs/reference/configuration.md index bd0b3e8e..5b59933f 100644 --- a/docs/reference/configuration.md +++ b/docs/reference/configuration.md @@ -3552,11 +3552,11 @@ markdownlint-cli2 is a static analysis tool to enforce standards and consistency markdownlint-cli2 is a static analysis tool to enforce standards and consistency for Markdown files. -### OXC +### Oxlint -OXC is a JavaScript/TypeScript linter written in Rust. +Oxlint is a JavaScript/TypeScript linter for OXC written in Rust. -#### Enable OXC +#### Enable Oxlint @@ -3564,7 +3564,7 @@ OXC is a JavaScript/TypeScript linter written in Rust. Location - Review > Tools > Enable OXC + Review > Tools > Enable Oxlint Default @@ -3591,7 +3591,7 @@ OXC is a JavaScript/TypeScript linter written in Rust. -OXC is a JavaScript/TypeScript linter written in Rust. +Oxlint is a JavaScript/TypeScript linter for OXC written in Rust. ### PHPStan diff --git a/docs/self-hosted/azure-devops.md b/docs/self-hosted/azure-devops.md index a843e1a7..7fc28ad8 100644 --- a/docs/self-hosted/azure-devops.md +++ b/docs/self-hosted/azure-devops.md @@ -103,6 +103,9 @@ LLM_TIMEOUT=360000 AWS_ACCESS_KEY_ID= AWS_SECRET_ACCESS_KEY= AWS_REGION= +# optionally, use cross-region inference to access models in other regions +# if this is set to `true`, CodeRabbit will access models from `us`, `eu`, or `ap` regions based on the AWS_REGION value. +AWS_USE_CROSS_REGION_INFERENCE=[] TEMP_PATH=/cache @@ -127,6 +130,8 @@ LINEAR_PAT=[] ENABLE_WEB_SEARCH=[true] PERPLEXITY_API_KEY=[] + +YAML_CONFIG=[] ``` :::note @@ -134,6 +139,7 @@ PERPLEXITY_API_KEY=[] - If you are using Azure OpenAI, verify that the model deployment names are in the .env file. - Values marked with [] are not optional to provide. - You can generate `CODERABBIT_API_KEY` from CodeRabbit UI -> Organizations Settings -> API Keys. +- `YAML_CONFIG` is an optional configuration file that can be used to customize CodeRabbit's behavior at the deployment level. It takes the same format as the [CodeRabbit YAML configuration](/docs/getting-started/configure-coderabbit.md) file. It requires the entire YAML file to be in an escaped string format, for example, `YAML_CONFIG="key1: value1\nkey2: value2"`. You can use [Escape YAML](https://escapeyaml.dev/) to generate the escaped string. ::: diff --git a/docs/self-hosted/bitbucket.md b/docs/self-hosted/bitbucket.md index 7e7faf2b..d1ef7492 100644 --- a/docs/self-hosted/bitbucket.md +++ b/docs/self-hosted/bitbucket.md @@ -95,6 +95,9 @@ LLM_TIMEOUT=360000 AWS_ACCESS_KEY_ID= AWS_SECRET_ACCESS_KEY= AWS_REGION= +# optionally, use cross-region inference to access models in other regions +# if this is set to `true`, CodeRabbit will access models from `us`, `eu`, or `ap` regions based on the AWS_REGION value. +AWS_USE_CROSS_REGION_INFERENCE=[] # System Configuration TEMP_PATH=/cache @@ -122,6 +125,8 @@ LINEAR_PAT=[] ENABLE_WEB_SEARCH=[true] PERPLEXITY_API_KEY=[] + +YAML_CONFIG=[] ``` :::note @@ -129,6 +134,7 @@ PERPLEXITY_API_KEY=[] - If you are using Azure OpenAI, verify that the model deployment names are in the .env file. Values marked with [] are optional and can be omitted if the feature is not needed. - You can generate `CODERABBIT_API_KEY` from CodeRabbit UI -> Organizations Settings -> API Keys. +- `YAML_CONFIG` is an optional configuration file that can be used to customize CodeRabbit's behavior at the deployment level. It takes the same format as the [CodeRabbit YAML configuration](/docs/getting-started/configure-coderabbit.md) file. It requires the entire YAML file to be in an escaped string format, for example, `YAML_CONFIG="key1: value1\nkey2: value2"`. You can use [Escape YAML](https://escapeyaml.dev/) to generate the escaped string. ::: diff --git a/docs/self-hosted/github.md b/docs/self-hosted/github.md index 3031e45c..c91d3cbf 100644 --- a/docs/self-hosted/github.md +++ b/docs/self-hosted/github.md @@ -106,6 +106,9 @@ LLM_TIMEOUT=360000 AWS_ACCESS_KEY_ID= AWS_SECRET_ACCESS_KEY= AWS_REGION= +# optionally, use cross-region inference to access models in other regions +# if this is set to `true`, CodeRabbit will access models from `us`, `eu`, or `ap` regions based on the AWS_REGION value. +AWS_USE_CROSS_REGION_INFERENCE=[] # if using Anthropic LLM_PROVIDER=anthropic @@ -140,6 +143,8 @@ LINEAR_PAT=[] ENABLE_WEB_SEARCH=[true] PERPLEXITY_API_KEY=[] + +YAML_CONFIG=[] ``` :::note @@ -150,6 +155,7 @@ PERPLEXITY_API_KEY=[] - For `GITHUB_HOSTNAME`, use GitHub Enterprise server's hostname, for example, “github.acme-inc.com” - You can generate `CODERABBIT_API_KEY` from CodeRabbit UI -> Organizations Settings -> API Keys. - When `ENABLE_LEARNINGS` is set to `true`, CodeRabbit will use `CODERABBIT_API_KEY` to store learnings on our servers. +- `YAML_CONFIG` is an optional configuration file that can be used to customize CodeRabbit's behavior at the deployment level. It takes the same format as the [CodeRabbit YAML configuration](/docs/getting-started/configure-coderabbit.md) file. It requires the entire YAML file to be in an escaped string format, for example, `YAML_CONFIG="key1: value1\nkey2: value2"`. You can use [Escape YAML](https://escapeyaml.dev/) to generate the escaped string. ::: diff --git a/docs/self-hosted/gitlab.md b/docs/self-hosted/gitlab.md index cd35c19f..c0535646 100644 --- a/docs/self-hosted/gitlab.md +++ b/docs/self-hosted/gitlab.md @@ -37,13 +37,46 @@ Consult official CodeRabbitAI documentation for a detailed [guide](https://docs. 1. **Navigate to Add Webhook Page**: Go to the webhook configuration page in the desired GitLab project. 2. **Add Webhook URL**: Enter the URL pointing to the CodeRabbit service, followed by `/gitlab_webhooks` (e.g., `http://127.0.0.1:8080/gitlab_webhooks`). 3. **Generate and Save Secret Token**: Generate a secret token, add it to the webhook, and store it securely. This will be needed for the `.env` file as `GITLAB_WEBHOOK_SECRET` (you can use a single secret token for all projects). -4. Select triggers: +4. **Select triggers**: - Push events - Comments - Issues events - Merge request events +## Add Webhook Using a Script + +We have a convenient [script](/code/gitlab-webhook.sh) to help you add webhooks to a project or all projects under a group in a GitLab instance. + +```bash +# Make sure the script is executable: +chmod +x gitlab-webhook.sh +``` + +Example usage: + +```bash +# PAT example (header auto-detected) +export GITLAB_TOKEN="glpat-xxxxx" +./gitlab-add-webhook.sh \ + -h "gitlab.example.com" -u "http:///gitlab_webhooks" \ + -s "mySecret" -p 42 + +# PAT example (explicit header) +./gitlab-add-webhook.sh \ + -h "gitlab.example.com" -u "http:///gitlab_webhooks" \ + -s "mySecret" -g "mygroup/mysubgroup/myproject" \ + -t "glpat-xxxxx" \ + -A "PRIVATE-TOKEN" + +# OAuth token with explicit header +./gitlab-add-webhook.sh \ + -h "gitlab.example.com" -u "http:///gitlab_webhooks" \ + -s "mySecret" -g "company/backend" \ + -t "eyJhbGciOi..." \ + -A "Authorization: Bearer" +``` + ## Prepare a `.env` file Create a `.env` file with the following content: @@ -101,6 +134,9 @@ LLM_TIMEOUT=360000 AWS_ACCESS_KEY_ID= AWS_SECRET_ACCESS_KEY= AWS_REGION= +# optionally, use cross-region inference to access models in other regions +# if this is set to `true`, CodeRabbit will access models from `us`, `eu`, or `ap` regions based on the AWS_REGION value. +AWS_USE_CROSS_REGION_INFERENCE=[] # if using Anthropic LLM_PROVIDER=anthropic @@ -134,6 +170,8 @@ LINEAR_PAT=[] ENABLE_WEB_SEARCH=[true] PERPLEXITY_API_KEY=[] + +YAML_CONFIG=[] ``` :::note @@ -141,6 +179,7 @@ PERPLEXITY_API_KEY=[] - If you are using Azure OpenAI, verify that the model deployment names are in the .env file. - Values marked with [] are not optional to provide. - You can generate `CODERABBIT_API_KEY` from CodeRabbit UI -> Organizations Settings -> API Keys. +- `YAML_CONFIG` is an optional configuration file that can be used to customize CodeRabbit's behavior at the deployment level. It takes the same format as the [CodeRabbit YAML configuration](/docs/getting-started/configure-coderabbit.md) file. It requires the entire YAML file to be in an escaped string format, for example, `YAML_CONFIG="key1: value1\nkey2: value2"`. You can use [Escape YAML](https://escapeyaml.dev/) to generate the escaped string. ::: diff --git a/docs/tools/dotenv.md b/docs/tools/dotenv.md index cac2613f..a3a2dfd2 100644 --- a/docs/tools/dotenv.md +++ b/docs/tools/dotenv.md @@ -12,6 +12,15 @@ import ProPlanNotice from '@site/src/components/ProPlanNotice.mdx'; [Dotenv Linter](https://github.com/dotenv-linter/dotenv-linter) is a lightning-fast linter for `.env` files. It helps ensure your environment files are consistent, typo-free, and follow best practices. +## Files + +Dotenv Linter will run on files with the following patterns: + +- `**/.env` +- `**/.env.*` + +We will not run against files that do not start with `.env` (e.g., `test.env`). However `.env.dev` or `.env.local` is fine. + :::note Dotenv Linter does not require configuration to run and automatically anlysises `.env` files. If no configuration file is found, it will use default settings. diff --git a/docs/tools/shopify-cli.md b/docs/tools/shopify-cli.md index 934a6345..cc574723 100644 --- a/docs/tools/shopify-cli.md +++ b/docs/tools/shopify-cli.md @@ -12,6 +12,31 @@ import ProPlanNotice from '@site/src/components/ProPlanNotice.mdx'; [Shopify CLI](https://github.com/Shopify/cli) is a command-line tool that helps you build Shopify apps, themes, and custom storefronts. It provides functionality for initializing, building, developing, and deploying Shopify projects. +## Requirements + +The tool only runs when the following conditions are met: + +### File Types + +- Only processes pull requests changing `*.liquid` files + +### Configuration Files + +- Requires either `.theme-check.yml` or `.theme-check.yaml` configuration file in the project root + +### Directory Structure + +- Requires the standard Shopify theme directory structure at the project root: + - `assets/` + - `config/` + - `layout/` + - `locales/` + - `sections/` + - `snippets/` + - `templates/` + +If any of these requirements are not met, the tool will not run. + ## Validation Rules The tool checks for: diff --git a/sidebars.ts b/sidebars.ts index 3cc53ff0..0e93a242 100644 --- a/sidebars.ts +++ b/sidebars.ts @@ -176,16 +176,21 @@ const sidebars: SidebarsConfig = { "tools/list", "tools/actionlint", "tools/biome", + "tools/brakeman", "tools/buf", + "tools/checkmake", "tools/checkov", "tools/circleci", + "tools/clippy", "tools/cppcheck", "tools/detekt", + "tools/dotenv", "tools/eslint", "tools/flake8", "tools/gitleaks", "tools/golangci-lint", "tools/hadolint", + "tools/htmlhint", "tools/languagetool", "tools/luacheck", "tools/markdownlint", @@ -196,6 +201,7 @@ const sidebars: SidebarsConfig = { "tools/pipeline-remediation", "tools/pmd", "tools/prisma-lint", + "tools/pylint", "tools/regal", "tools/rubocop", "tools/ruff", diff --git a/static/code/gitlab-webhook.sh b/static/code/gitlab-webhook.sh new file mode 100755 index 00000000..87c3be7d --- /dev/null +++ b/static/code/gitlab-webhook.sh @@ -0,0 +1,297 @@ +#!/usr/bin/env bash + +## gitlab-webhook.sh +# Add a webhook to one project, or every project in a subgroup tree. + +## Example usage: +# Make sure the script is executable: +# chmod +x gitlab-webhook.sh + +# PAT auto-detected header +# export GITLAB_TOKEN="glpat-xxxxx" +# ./gitlab-add-webhook.sh \ +# -h "gitlab.example.com" -u "https://ci.example.com/gitlab-hook" \ +# -s "mySecret" -p 42 + +# PAT with explicit header +# ./gitlab-add-webhook.sh \ +# -h "gitlab.example.com" -u "https://ci.example.com/gitlab-hook" \ +# -s "mySecret" -g "mygroup/mysubgroup/myproject" \ +# -t "glpat-qj5s..." \ +# -A "PRIVATE-TOKEN" + +# OAuth token with explicit header +# ./gitlab-add-webhook.sh \ +# -h "gitlab.example.com" -u "https://ci.example.com/gitlab-hook" \ +# -s "mySecret" -g "company/backend" \ +# -t "eyJhbGciOi..." \ +# -A "Authorization: Bearer" + + +set -euo pipefail + +usage() { + cat < -u -s \\ + [-t ] [-A ] [-p | -g ] [-v] + +Required: + -h GitLab host (e.g. gitlab.example.com) + -u Webhook endpoint URL to receive POSTs + -s Webhook secret token (used for signature verification) + +Authentication (one of): + -t Access token (PAT, project, group or OAuth). If omitted, \$GITLAB_TOKEN is used + -A Auth header to use. Default detects: + PAT → "PRIVATE-TOKEN" + anything else → "Authorization: Bearer" + +Scope (choose one): + -p Project ID or full path (e.g. 42 or group/app) + -g Group ID or full path, recurse through all subgroups & projects + +Options: + -v Verbose output (show individual project IDs in final summary) +EOF + exit 1 +} + +HOST="" HOOK_URL="" HOOK_SECRET="" +TOKEN="${GITLAB_TOKEN:-}" AUTH_HEADER="" +PROJECT="" GROUP="" VERBOSE=false + +while getopts "h:u:s:t:A:p:g:v" opt; do + case "$opt" in + h) HOST=$OPTARG ;; + u) HOOK_URL=$OPTARG ;; + s) HOOK_SECRET=$OPTARG ;; + t) TOKEN=$OPTARG ;; + A) AUTH_HEADER=$OPTARG ;; + p) PROJECT=$OPTARG ;; + g) GROUP=$OPTARG ;; + v) VERBOSE=true ;; + *) usage ;; + esac +done + +# Mandatory checks +[[ -z $HOST || -z $HOOK_URL || -z $HOOK_SECRET ]] && usage +[[ -n $PROJECT && -n $GROUP ]] && usage +[[ -z $PROJECT && -z $GROUP ]] && usage + +# Token handling +if [[ -z $TOKEN ]]; then + echo "[ERROR] No access token provided. Use -t or set \$GITLAB_TOKEN" >&2 + exit 1 +fi + +# Choose header if not forced +if [[ -z $AUTH_HEADER ]]; then + if [[ $TOKEN == glpat-* || $TOKEN == "PAT-"* ]]; then + AUTH_HEADER="PRIVATE-TOKEN" + else + AUTH_HEADER="Authorization: Bearer" + fi +fi + +API="https://${HOST}/api/v4" +CURL_BASE=(curl -sSf --header "${AUTH_HEADER}: ${TOKEN}") + +# Track processed projects to avoid duplicates +declare -A PROCESSED_PROJECTS +# Track projects where webhooks were successfully added +WEBHOOK_PROJECTS=() +# Track projects where webhooks already existed +EXISTING_WEBHOOK_PROJECTS=() +# Progress counters +TOTAL_PROJECTS_FOUND=0 +PROJECTS_PROCESSED=0 + +############################################################################## +# Helpers +############################################################################## +url_encode() { + local string="$1" + # URL encode the string using printf and sed + printf '%s' "$string" | sed 's/\//%2F/g; s/ /%20/g; s/@/%40/g; s/:/%3A/g; s/#/%23/g; s/?/%3F/g; s/&/%26/g; s/=/%3D/g; s/+/%2B/g' +} + +# Function to handle paginated API calls +fetch_paginated() { + local url=$1 + local page=1 + local per_page=100 + + while true; do + local paginated_url="${url}?per_page=${per_page}&page=${page}" + + # Add existing query params if they exist + if [[ "$url" == *"?"* ]]; then + paginated_url="${url}&per_page=${per_page}&page=${page}" + fi + + local response + response=$("${CURL_BASE[@]}" "$paginated_url" 2>/dev/null) || { + echo "[ERROR] Failed to fetch page $page from $url" >&2 + return 1 + } + + # Check if response is empty array or null + if [[ "$response" == "[]" || "$response" == "null" ]]; then + break + fi + + # Extract results from current page + local page_results + page_results=$(echo "$response" | jq -r '.[].id' 2>/dev/null) || { + echo "[ERROR] Failed to parse JSON response from page $page" >&2 + return 1 + } + + # If no results on this page, we're done + if [[ -z "$page_results" ]]; then + break + fi + + # Count projects found and show progress + local page_count + page_count=$(echo "$page_results" | wc -l) + TOTAL_PROJECTS_FOUND=$((TOTAL_PROJECTS_FOUND + page_count)) + echo "[PROGRESS] Found $page_count projects on page $page (total: $TOTAL_PROJECTS_FOUND)" >&2 + + # Output page results + echo "$page_results" + + # If we got less than per_page results, we're on the last page + local item_count + item_count=$(echo "$response" | jq '. | length' 2>/dev/null) || 0 + if [[ "$item_count" -lt "$per_page" ]]; then + break + fi + + ((page++)) + done +} + +create_hook() { + local pid=$1 + + # Skip if already processed + if [[ -n "${PROCESSED_PROJECTS[$pid]:-}" ]]; then + return 0 + fi + + # Mark as processed + PROCESSED_PROJECTS[$pid]=1 + PROJECTS_PROCESSED=$((PROJECTS_PROCESSED + 1)) + + local encoded_pid + # URL encode if pid is not purely numeric + if [[ $pid =~ ^[0-9]+$ ]]; then + encoded_pid=$pid + else + encoded_pid=$(url_encode "$pid") + fi + + # Check if webhook already exists + local existing_webhooks + existing_webhooks=$("${CURL_BASE[@]}" "${API}/projects/${encoded_pid}/hooks" 2>/dev/null) || { + echo "[ERROR] Failed to fetch existing webhooks for project $pid" >&2 + return 1 + } + + # Check if our webhook URL already exists + if echo "$existing_webhooks" | jq -e --arg url "$HOOK_URL" '.[] | select(.url == $url)' >/dev/null 2>&1; then + [[ "$VERBOSE" == "true" ]] && echo "[INFO] Webhook already exists for project: $pid" >&2 + EXISTING_WEBHOOK_PROJECTS+=("$pid") + return 0 + fi + + [[ "$VERBOSE" == "true" ]] && echo "[INFO] Adding webhook to project: $pid" >&2 + + "${CURL_BASE[@]}" --request POST \ + --data-urlencode "url=${HOOK_URL}" \ + --data "token=${HOOK_SECRET}" \ + --data "push_events=true" \ + --data "note_events=true" \ + --data "issues_events=true" \ + --data "merge_requests_events=true" \ + --data "enable_ssl_verification=true" \ + "${API}/projects/${encoded_pid}/hooks" \ + >/dev/null + + # Track successful webhook creation + WEBHOOK_PROJECTS+=("$pid") +} + +traverse_group() { + local gid=$1 + local encoded_gid + # URL encode if gid is not purely numeric + if [[ $gid =~ ^[0-9]+$ ]]; then + encoded_gid=$gid + else + encoded_gid=$(url_encode "$gid") + fi + + # projects (includes nested sub-groups) - with pagination + while IFS= read -r pid; do + [[ -n "$pid" ]] && create_hook "$pid" + done < <( + fetch_paginated "${API}/groups/${encoded_gid}/projects?include_subgroups=true" + ) + + # recurse explicit subgroups (older GitLab) - with pagination + while IFS= read -r sg; do + [[ -n "$sg" ]] && traverse_group "$sg" + done < <( + fetch_paginated "${API}/groups/${encoded_gid}/subgroups" + ) +} + +############################################################################## +# Main +############################################################################## +echo "[INFO] Starting webhook processing..." >&2 + +if [[ -n $PROJECT ]]; then + echo "[INFO] Processing single project: $PROJECT" >&2 + create_hook "$PROJECT" +else + echo "[INFO] Processing group and subgroups: $GROUP" >&2 + traverse_group "$GROUP" +fi + +echo "[INFO] Finished processing all projects" >&2 + +# Print final summary +total_projects=$((${#WEBHOOK_PROJECTS[@]} + ${#EXISTING_WEBHOOK_PROJECTS[@]})) + +if [[ $total_projects -eq 0 ]]; then + echo "[INFO] No projects were processed" +else + if [[ ${#WEBHOOK_PROJECTS[@]} -gt 0 ]]; then + if [[ "$VERBOSE" == "true" ]]; then + echo "[INFO] Webhooks installed successfully on ${#WEBHOOK_PROJECTS[@]} project(s):" + for pid in "${WEBHOOK_PROJECTS[@]}"; do + echo " - Project ID: $pid" + done + else + echo "[INFO] Webhooks installed successfully on ${#WEBHOOK_PROJECTS[@]} project(s)" + fi + fi + + if [[ ${#EXISTING_WEBHOOK_PROJECTS[@]} -gt 0 ]]; then + if [[ "$VERBOSE" == "true" ]]; then + echo "[INFO] Webhooks already existed on ${#EXISTING_WEBHOOK_PROJECTS[@]} project(s):" + for pid in "${EXISTING_WEBHOOK_PROJECTS[@]}"; do + echo " - Project ID: $pid" + done + else + echo "[INFO] Webhooks already existed on ${#EXISTING_WEBHOOK_PROJECTS[@]} project(s)" + fi + fi + + echo "[INFO] Total projects processed: $total_projects" +fi diff --git a/static/schema/schema.v2.json b/static/schema/schema.v2.json index 4dcaeba6..afde59e4 100644 --- a/static/schema/schema.v2.json +++ b/static/schema/schema.v2.json @@ -374,6 +374,81 @@ "additionalProperties": false, "default": {} }, + "pre_merge_checks": { + "type": "object", + "properties": { + "docstrings": { + "type": "object", + "properties": { + "mode": { + "type": "string", + "enum": ["off", "warning", "error"], + "default": "warning", + "description": "Mode | Level of enforcement for docstring coverage check. Warning only generates a warning and does not require the user to resolve the check. While error requires the user to resolve issues before merging pull request." + }, + "threshold": { + "type": "number", + "minimum": 0, + "maximum": 100, + "default": 80, + "description": "Percentage threshold for docstring coverage check." + } + }, + "additionalProperties": false, + "default": {}, + "description": "Docstring Coverage | Checks if the code has sufficient docstrings." + }, + "title": { + "type": "object", + "properties": { + "mode": { + "type": "string", + "enum": ["off", "warning", "error"], + "default": "warning", + "description": "Mode | Level of enforcement for pull request title check. Warning only generates a warning and does not require the user to resolve the check. While error requires the user to resolve issues before merging pull request." + }, + "requirements": { + "type": "string", + "default": "", + "description": "Requirements | Requirements for the pull request title. Example: 'Title should be concise and descriptive, ideally under 50 characters.'" + } + }, + "additionalProperties": false, + "default": {}, + "description": "Title Check | Checks if the pull request title is appropriate and follows best practices." + }, + "description": { + "type": "object", + "properties": { + "mode": { + "type": "string", + "enum": ["off", "warning", "error"], + "default": "warning", + "description": "Mode | Level of enforcement for pull request description check. Warning only generates a warning and does not require the user to resolve the check. While error requires the user to resolve issues before merging pull request." + } + }, + "additionalProperties": false, + "default": {}, + "description": "Description Check | Checks if the pull request description is appropriate and follows best practices." + }, + "issue_assessment": { + "type": "object", + "properties": { + "mode": { + "type": "string", + "enum": ["off", "warning", "error"], + "default": "warning", + "description": "Mode | Level of enforcement for linked issue assessment. Warning only generates a warning and does not require the user to resolve the check. While error requires the user to resolve issues before merging pull request." + } + }, + "additionalProperties": false, + "default": {}, + "description": "Linked Issue Assessment | Checks if the pull request addresses the linked issues. Generate an assessment of how well the changes address the linked issues." + } + }, + "additionalProperties": false, + "default": {} + }, "tools": { "type": "object", "properties": { @@ -412,7 +487,7 @@ }, "additionalProperties": false, "default": {}, - "description": "Enable ast-grep | ast-grep is a code analysis tool that helps you to find patterns in your codebase using abstract syntax trees patterns. | v0.38.1" + "description": "Enable ast-grep | ast-grep is a code analysis tool that helps you to find patterns in your codebase using abstract syntax trees patterns. | v0.38.6" }, "shellcheck": { "type": "object", @@ -433,7 +508,7 @@ "enabled": { "type": "boolean", "default": true, - "description": "Enable Ruff | Ruff is a Python linter and code formatter. | Enable Ruff integration. | v0.11.9" + "description": "Enable Ruff | Ruff is a Python linter and code formatter. | Enable Ruff integration. | v0.12.2" } }, "additionalProperties": false, @@ -578,7 +653,7 @@ "enabled": { "type": "boolean", "default": true, - "description": "Enable PHPStan | PHPStan requires [config file](https://phpstan.org/config-reference#config-file) in your repository root. Please ensure that this file contains the `paths:` parameter. | v2.1.15" + "description": "Enable PHPStan | PHPStan requires [config file](https://phpstan.org/config-reference#config-file) in your repository root. Please ensure that this file contains the `paths:` parameter. | v2.1.17" }, "level": { "type": "string", @@ -666,7 +741,7 @@ "enabled": { "type": "boolean", "default": true, - "description": "Enable Gitleaks | Gitleaks is a secret scanner. | Enable Gitleaks integration. | v8.26.0" + "description": "Enable Gitleaks | Gitleaks is a secret scanner. | Enable Gitleaks integration. | v8.27.2" } }, "additionalProperties": false, @@ -735,7 +810,7 @@ "enabled": { "type": "boolean", "default": true, - "description": "Enable RuboCop | RuboCop is a Ruby static code analyzer (a.k.a. linter ) and code formatter. | v1.75.5" + "description": "Enable RuboCop | RuboCop is a Ruby static code analyzer (a.k.a. linter ) and code formatter. | v1.76.1" } }, "additionalProperties": false, @@ -748,7 +823,7 @@ "enabled": { "type": "boolean", "default": true, - "description": "Enable Buf | Buf offers linting for Protobuf files. | v1.54.0" + "description": "Enable Buf | Buf offers linting for Protobuf files. | v1.55.1" } }, "additionalProperties": false, @@ -761,7 +836,7 @@ "enabled": { "type": "boolean", "default": true, - "description": "Enable Regal | Regal is a linter and language server for Rego. | v0.33.1" + "description": "Enable Regal | Regal is a linter and language server for Rego. | v0.35.1" } }, "additionalProperties": false, @@ -787,7 +862,7 @@ "enabled": { "type": "boolean", "default": true, - "description": "Enable PMD | PMD is an extensible multilanguage static code analyzer. It’s mainly concerned with Java. | v7.13.0" + "description": "Enable PMD | PMD is an extensible multilanguage static code analyzer. It’s mainly concerned with Java. | v7.15.0" }, "config_file": { "type": "string", @@ -834,7 +909,7 @@ "enabled": { "type": "boolean", "default": true, - "description": "Enable CircleCI | CircleCI tool is a static checker for CircleCI config files. | v0.1.31687" + "description": "Enable CircleCI | CircleCI tool is a static checker for CircleCI config files. | v0.1.32638" } }, "additionalProperties": false, @@ -860,7 +935,7 @@ "enabled": { "type": "boolean", "default": true, - "description": "Enable SQLFluff | SQLFluff is an open source, dialect-flexible and configurable SQL linter. | v3.4.0" + "description": "Enable SQLFluff | SQLFluff is an open source, dialect-flexible and configurable SQL linter. | v3.4.1" } }, "additionalProperties": false, @@ -873,7 +948,7 @@ "enabled": { "type": "boolean", "default": true, - "description": "Enable Prisma Schema linting | Prisma Schema linting helps maintain consistent and error-free schema files | v0.10.1" + "description": "Enable Prisma Schema linting | Prisma Schema linting helps maintain consistent and error-free schema files | v0.10.2" } }, "additionalProperties": false, @@ -899,12 +974,12 @@ "enabled": { "type": "boolean", "default": true, - "description": "Enable OXC | OXC is a JavaScript/TypeScript linter written in Rust. | v0.16.10" + "description": "Enable Oxlint | Oxlint is a JavaScript/TypeScript linter for OXC written in Rust. | v0.16.10" } }, "additionalProperties": false, "default": {}, - "description": "OXC is a JavaScript/TypeScript linter written in Rust." + "description": "Oxlint is a JavaScript/TypeScript linter for OXC written in Rust." }, "shopifyThemeCheck": { "type": "object",