diff --git a/config/_default/menus/menus.en.yaml b/config/_default/menus/menus.en.yaml index 98e8efdb35645..9c7b4c6160e35 100644 --- a/config/_default/menus/menus.en.yaml +++ b/config/_default/menus/menus.en.yaml @@ -2375,212 +2375,239 @@ main: identifier: ci parent: software_delivery_heading weight: 150000 - - name: Test Visibility - url: continuous_integration/tests/ - parent: ci - identifier: ci_tests - weight: 1 - - name: Setup - url: continuous_integration/tests/setup/ - parent: ci_tests - identifier: ci_tests_setup - weight: 101 - - name: .NET - url: continuous_integration/tests/setup/dotnet/ - parent: ci_tests_setup - identifier: ci_tests_setup_dotnet - weight: 101 - - name: Java - url: continuous_integration/tests/setup/java/ - parent: ci_tests_setup - identifier: ci_tests_setup_java - weight: 102 - - name: JavaScript and TypeScript - url: continuous_integration/tests/setup/javascript/ - parent: ci_tests_setup - identifier: ci_tests_setup_javascript - weight: 103 - - name: Python - url: continuous_integration/tests/setup/python/ - parent: ci_tests_setup - identifier: ci_tests_setup_python - weight: 104 - - name: Ruby - url: continuous_integration/tests/setup/ruby/ - parent: ci_tests_setup - identifier: ci_tests_setup_ruby - weight: 105 - - name: Swift - url: continuous_integration/tests/setup/swift/ - parent: ci_tests_setup - identifier: ci_tests_setup_swift - weight: 106 - - name: JUnit Report Uploads - url: continuous_integration/tests/setup/junit_xml/ - parent: ci_tests_setup - identifier: ci_tests_setup_junit_xml - weight: 107 - - name: Tests in Containers - url: continuous_integration/tests/containers/ - parent: ci_tests - identifier: ci_tests_containers - weight: 103 - - name: Developer Workflows - url: continuous_integration/tests/developer_workflows - parent: ci_tests - identifier: ci_tests_developer_workflows - weight: 104 - - name: Code Coverage - url: continuous_integration/tests/code_coverage - parent: ci_tests - identifier: ci_tests_code_coverage - weight: 105 - - name: Instrument Browser Tests with RUM - url: continuous_integration/tests/browser_tests - parent: ci_tests - identifier: ci_tests_browser_tests - weight: 106 - - name: Instrument Swift Tests with RUM - url: continuous_integration/tests/swift_tests - parent: ci_tests - identifier: ci_tests_swift_tests - weight: 107 - - name: Intelligent Test Runner - url: continuous_integration/intelligent_test_runner/ - parent: ci - identifier: ci_intelligent_test_runner - weight: 2 - - name: Setup - url: continuous_integration/intelligent_test_runner/setup/ - parent: ci_intelligent_test_runner - identifier: ci_intelligent_test_runner_setup - weight: 201 - - name: .NET - url: continuous_integration/intelligent_test_runner/setup/dotnet/ - parent: ci_intelligent_test_runner_setup - identifier: ci_itr_dotnet - weight: 201 - - name: Java - url: continuous_integration/intelligent_test_runner/setup/java/ - parent: ci_intelligent_test_runner_setup - identifier: ci_itr_java - weight: 202 - - name: JavaScript and TypeScript - url: continuous_integration/intelligent_test_runner/setup/javascript/ - parent: ci_intelligent_test_runner_setup - identifier: ci_itr_javascript - weight: 203 - - name: Python - url: continuous_integration/intelligent_test_runner/setup/python/ - parent: ci_intelligent_test_runner_setup - identifier: ci_itr_python - weight: 204 - - name: Swift - url: continuous_integration/intelligent_test_runner/setup/swift/ - parent: ci_intelligent_test_runner_setup - identifier: ci_itr_swift - weight: 205 - name: Pipeline Visibility url: continuous_integration/pipelines/ parent: ci identifier: pipeline_visibility - weight: 3 + weight: 1 - name: AWS CodePipeline url: continuous_integration/pipelines/awscodepipeline/ parent: pipeline_visibility identifier: ci_awscodepipeline - weight: 301 + weight: 101 - name: Azure url: continuous_integration/pipelines/azure/ parent: pipeline_visibility identifier: ci_azure - weight: 302 + weight: 102 - name: Buildkite url: continuous_integration/pipelines/buildkite/ parent: pipeline_visibility identifier: ci_buildkite - weight: 303 + weight: 103 - name: CircleCI url: continuous_integration/pipelines/circleci/ parent: pipeline_visibility identifier: ci_circleci - weight: 304 + weight: 104 - name: Codefresh url: continuous_integration/pipelines/codefresh/ parent: pipeline_visibility identifier: ci_codefresh - weight: 305 + weight: 105 - name: GitHub Actions url: continuous_integration/pipelines/github/ parent: pipeline_visibility identifier: ci_github - weight: 306 + weight: 106 - name: GitLab url: continuous_integration/pipelines/gitlab/ parent: pipeline_visibility identifier: ci_gitlab - weight: 307 + weight: 107 - name: Jenkins url: continuous_integration/pipelines/jenkins/ parent: pipeline_visibility identifier: ci_jenkins - weight: 308 + weight: 108 - name: TeamCity url: continuous_integration/pipelines/teamcity/ parent: pipeline_visibility identifier: ci_teamcity - weight: 309 + weight: 109 - name: Custom Commands url: continuous_integration/pipelines/custom_commands/ parent: pipeline_visibility identifier: ci_custom_commands - weight: 310 + weight: 110 - name: Custom Tags and Metrics url: continuous_integration/pipelines/custom_tags_and_metrics/ parent: pipeline_visibility identifier: ci_custom_tags_and_metrics - weight: 311 + weight: 111 - name: Search and Manage url: continuous_integration/search/ parent: ci identifier: ci_search - weight: 4 + weight: 2 - name: Explorer - url: continuous_integration/explorer/ + url: continuous_integration/explorer parent: ci identifier: ci_explorer - weight: 5 + weight: 3 - name: Search Syntax url: continuous_integration/explorer/search_syntax/ parent: ci_explorer identifier: ci_explorer_search_syntax - weight: 501 + weight: 301 - name: Facets url: continuous_integration/explorer/facets/ parent: ci_explorer identifier: ci_explorer_facets - weight: 502 + weight: 302 - name: Saved Views url: continuous_integration/explorer/saved_views/ parent: ci_explorer identifier: ci_explorer_saved_views - weight: 502 + weight: 302 - name: Guides url: continuous_integration/guides/ parent: ci identifier: ci_guides - weight: 6 + weight: 4 - name: Troubleshooting url: continuous_integration/troubleshooting/ parent: ci identifier: ci_troubleshooting + weight: 5 + - name: Test Visibility + url: tests/ + pre: ci + parent: software_delivery_heading + identifier: tests + weight: 160000 + - name: Setup + url: tests/setup/ + parent: tests + identifier: tests_setup + weight: 1 + - name: .NET + url: tests/setup/dotnet/ + parent: tests_setup + identifier: tests_setup_dotnet + weight: 101 + - name: Java + url: tests/setup/java/ + parent: tests_setup + identifier: tests_setup_java + weight: 102 + - name: JavaScript and TypeScript + url: tests/setup/javascript/ + parent: tests_setup + identifier: tests_setup_javascript + weight: 103 + - name: Python + url: tests/setup/python/ + parent: tests_setup + identifier: tests_setup_python + weight: 104 + - name: Ruby + url: tests/setup/ruby/ + parent: tests_setup + identifier: tests_setup_ruby + weight: 105 + - name: Swift + url: tests/setup/swift/ + parent: tests_setup + identifier: tests_setup_swift + weight: 106 + - name: JUnit Report Uploads + url: tests/setup/junit_xml/ + parent: tests_setup + identifier: tests_setup_junit_xml + weight: 107 + - name: Tests in Containers + url: tests/containers/ + parent: tests + identifier: tests_containers + weight: 2 + - name: Search and Manage + url: tests/search/ + parent: tests + identifier: tests_search + weight: 3 + - name: Explorer + url: continuous_integration/explorer + parent: tests + identifier: tests_explorer + weight: 4 + - name: Developer Workflows + url: tests/developer_workflows + parent: tests + identifier: tests_developer_workflows + weight: 5 + - name: Code Coverage + url: tests/code_coverage + parent: tests + identifier: tests_code_coverage + weight: 6 + - name: Instrument Browser Tests with RUM + url: tests/browser_tests + parent: tests + identifier: tests_browser_tests weight: 7 + - name: Instrument Swift Tests with RUM + url: tests/swift_tests + parent: tests + identifier: tests_swift_tests + weight: 8 + - name: Guides + url: tests/guides/ + parent: tests + identifier: tests_guides + weight: 9 + - name: Troubleshooting + url: tests/troubleshooting/ + parent: tests + identifier: tests_troubleshooting + weight: 10 + - name: Intelligent Test Runner + url: intelligent_test_runner/ + pre: ci + parent: software_delivery_heading + identifier: intelligent_test_runner + weight: 170000 + - name: Setup + url: intelligent_test_runner/setup/ + parent: intelligent_test_runner + identifier: intelligent_test_runner_setup + weight: 1 + - name: .NET + url: intelligent_test_runner/setup/dotnet/ + parent: intelligent_test_runner_setup + identifier: intelligent_test_runner_setup_dotnet + weight: 101 + - name: Java + url: intelligent_test_runner/setup/java/ + parent: intelligent_test_runner_setup + identifier: intelligent_test_runner_setup_java + weight: 202 + - name: JavaScript and TypeScript + url: intelligent_test_runner/setup/javascript/ + parent: intelligent_test_runner_setup + identifier: intelligent_test_runner_setup_javascript + weight: 103 + - name: Python + url: intelligent_test_runner/setup/python/ + parent: intelligent_test_runner_setup + identifier: intelligent_test_runner_setup_python + weight: 104 + - name: Swift + url: intelligent_test_runner/setup/swift/ + parent: intelligent_test_runner_setup + identifier: intelligent_test_runner_setup_swift + weight: 105 + - name: Troubleshooting + url: intelligent_test_runner/troubleshooting/ + parent: intelligent_test_runner + identifier: intelligent_test_runner_troubleshooting + weight: 2 - name: CD Visibility url: continuous_delivery/ pre: ci identifier: cd parent: software_delivery_heading - weight: 160000 + weight: 180000 - name: Deployment Visibility url: continuous_delivery/deployments identifier: cd_deployments @@ -2621,13 +2648,13 @@ main: pre: ci parent: software_delivery_heading identifier: dora_metrics - weight: 170000 + weight: 190000 - name: Static Analysis url: static_analysis/ pre: ci parent: software_delivery_heading identifier: static_analysis - weight: 180000 + weight: 200000 - name: Rules url: static_analysis/rules parent: static_analysis @@ -2648,7 +2675,7 @@ main: pre: ci parent: software_delivery_heading identifier: quality_gates - weight: 190000 + weight: 210000 - name: Database Monitoring url: database_monitoring/ pre: database-2 diff --git a/content/en/continuous_integration/guides/_index.md b/content/en/continuous_integration/guides/_index.md index 9cc7f12f151b0..d28751f7775bd 100644 --- a/content/en/continuous_integration/guides/_index.md +++ b/content/en/continuous_integration/guides/_index.md @@ -10,11 +10,6 @@ cascade: subcategory: CI Visibility Guides --- -{{< whatsnext desc="Test Visibility Guides:" >}} - {{< nextlink href="/continuous_integration/guides/flaky_test_management" >}}Flaky Test Management{{< /nextlink >}} - {{< nextlink href="/continuous_integration/guides/add_custom_metrics" >}}Add Custom Metrics to Your Tests{{< /nextlink >}} -{{< /whatsnext >}} - {{< whatsnext desc="Pipeline Visibility Guides:" >}} {{< nextlink href="/continuous_integration/guides/ingestion_control" >}}Creating Exclusion Filters for Ingestion Control{{< /nextlink >}} {{< nextlink href="/continuous_integration/guides/pipeline_data_model" >}}Understanding the Pipeline Data Model and Execution Types{{< /nextlink >}} diff --git a/content/en/continuous_integration/search/_index.md b/content/en/continuous_integration/search/_index.md index f8c587cef7541..fe7f442a71454 100644 --- a/content/en/continuous_integration/search/_index.md +++ b/content/en/continuous_integration/search/_index.md @@ -1,18 +1,10 @@ --- -title: Search and Manage CI Pipelines and Tests -description: Learn how to search for your CI pipelines and tests. -aliases: -- /continuous_integration/guides/find_flaky_tests/ -algolia: - rank: 70 - tags: ['flaky test', 'flaky tests', 'test regression', 'test regressions', 'test service', 'test services'] +title: Search and Manage CI Pipelines +description: Learn how to search for your CI pipelines. further_reading: - link: "/continuous_integration/explorer" tag: "Documentation" - text: "Search and filter test runs or pipeline executions" -- link: "/continuous_integration/guides/flaky_test_management" - tag: "Documentation" - text: "Learn how to manage flaky tests" + text: "Search and filter pipeline executions" --- {{< site-region region="gov" >}} @@ -21,10 +13,7 @@ further_reading: ## Overview -{{< tabs >}} -{{% tab "Pipelines" %}} - -The [Pipelines page][101] is useful for developers who want to keep an eye on the build pipeline for their service. +The [Pipelines page][1] is useful for developers who want to keep an eye on the build pipeline for their service. {{< img src="/continuous_integration/pipelines.png" text="CI Pipelines page" style="width:100%" >}} @@ -42,13 +31,13 @@ You can access high-level accumulation and trends, including: ## Search for pipelines -To see your pipelines, navigate to [**CI** > **Pipelines**][101]. +To see your pipelines, navigate to [**CI** > **Pipelines**][1]. -The [Pipelines page][101] shows aggregate stats for the default branch of each pipeline over the selected time frame, as well as the status of the latest pipeline execution. Use this page to see all your pipelines and get a quick view of their health. Only pipelines with Git information associated to the default branch (usually named `main` or `prod`), as well as pipelines without any Git information, are displayed on this page. +The [Pipelines page][1] shows aggregate stats for the default branch of each pipeline over the selected time frame, as well as the status of the latest pipeline execution. Use this page to see all your pipelines and get a quick view of their health. Only pipelines with Git information associated to the default branch (usually named `main` or `prod`), as well as pipelines without any Git information, are displayed on this page. The metrics shown include build frequency, failure rate, median duration, and change in median duration on both an absolute and relative basis. This information reveals which pipelines are high-usage and potentially high-resource consumers, or are experiencing regressions. The last build result, duration, and last runtime shows you the effect of the last commit. -You can filter the page by pipeline name to see the pipelines you're most concerned with. Click on a pipeline that is slow or failing to dig into details that show what commit might have introduced the performance regression or build error. If you are using [Datadog Teams][106], you can filter for specific pipelines associated to your team using [custom tags][107] that match team handles. +You can filter the page by pipeline name to see the pipelines you're most concerned with. Click on a pipeline that is slow or failing to dig into details that show what commit might have introduced the performance regression or build error. If you are using [Datadog Teams][6], you can filter for specific pipelines associated to your team using [custom tags][7] that match team handles. ## Pipeline details and executions @@ -74,9 +63,9 @@ If job log collection is supported and enabled for the CI provider, related log Job log collection is supported for the following providers: -- [GitHub Actions][103] -- [GitLab][104] -- [Jenkins][105] +- [GitHub Actions][3] +- [GitLab][4] +- [Jenkins][5] #### AI-generated log summaries @@ -84,88 +73,13 @@ Job log collection is supported for the following providers: Pipeline Visibility provides AI-generated explanations for pipeline errors based on your CI job logs. These explanations can be found on the **Failed Jobs** tab for each pipeline execution. You can use these summaries to determine whether an error in CI is associated with developer-written code or the CI pipeline itself, as well as troubleshoot execution failures. -[101]: https://app.datadoghq.com/ci/pipelines -[103]: /continuous_integration/pipelines/github/#enable-log-collection -[104]: /continuous_integration/pipelines/gitlab/#enable-job-log-collection-beta -[105]: /continuous_integration/pipelines/jenkins#enable-job-log-collection -[106]: /account_management/teams/ -[107]: /continuous_integration/pipelines/custom_tags_and_metrics/?tab=linux - -{{% /tab %}} -{{% tab "Tests" %}} - -The [Tests page][101] is useful for developers who want to keep an eye on their test results. - -{{< img src="/continuous_integration/tests.png" text="CI Tests page" style="width:100%" >}} - -You can access low-level and immediate insights: - -- See what tests are failing and why. -- See your last commit's test results. -- View the wall time of your tests in your feature branch and compare it to the default branch, to identify if you're about to introduce a performance regression. -- Find out if your commit introduces a new [flaky test][105] that wasn't flaky before, indicating that your code change is what's making it flaky. This gives you the opportunity to fix the problem before proceeding rather than contributing to the number of flaky tests in your CI. - -You can also access high-level accumulation and trends: - -- See the effects that changed code, added tests, and increased complexity have on your test suite performance over time. -- See which tests have become slower over time and identify the commit that introduced the regression. -- Take advantage of Datadog's automatic test flakiness detection and tracking, which shows you which tests are becoming more or less unreliable over time. - -## Search for tests - -To see your tests, navigate to [**CI** > **Tests**][101] and select between the [**Branches**](#branches-view) or [**Default Branches** view](#default-branches-view). - -### Branches view - -The [Branches][102] view of the Tests page lists all branches from all [test services][103] that have reported test results. This tab is useful for individual developers to quickly see the status of tests that run on their code branches and troubleshoot test failures. - -In this page, you can filter the list by name, test service, or commit SHA, or to show only your branches (branches that contain at least one commit authored by you), enable the **My branches** toggle and add the email addresses you use in your Git configuration. - -#### Test results - -For each branch, you can see the test service, the number of failed, passed, and skipped tests, test regressions, wall time, the percentage of change compared to the default branch, when the commit was last updated, and the avatar of the author of the commit. - -Click on a branch to explore the test details page, which includes information about the branch's latest commits, flaky tests, test performance, common error types, and all test runs. - -{{< img src="continuous_integration/test_details.png" alt="Test Details page for a single branch" style="width:100%;">}} - -#### Test suite performance - -There is also information about the [wall time][104] of the most recent test suite run, and a comparison to the average wall time of the default branch. The comparison of your branch's wall time to the default branch's wall time can help you determine if your commit is introducing performance [regressions][106] to your test suite. - -Hovering over the commit author avatar shows detailed information about the latest commit. - -#### Test regressions - -[Test regressions][106] are evaluated per commit in an effort to tie performance regressions to specific code changes. - -#### Investigate for more details - -Click on the row to see test suite run details such as test results for the last commit on this branch (or you can switch branches), failing tests and the most common errors, slow tests, flaky tests, and a complete list of test runs over the time frame selected. You can filter this list of test runs by facet to get to the information you want to see most. - -Click into one of the test runs to see the test trace as a flame graph or a span list. The _Runs (n)_ list on the left lets you quickly access traces for each retry of the test for the same commit. - -#### Explore connections to services, resources, logs, and network events - -Click the CI provider link to examine the Resource, Service, or Analytics page for the test. You can also find complete tags information and links to related log events and network monitoring events. - -### Default Branches view - -The [Default Branches][107] view of the Tests page shows aggregated health metrics for the _default_ branch of each test service. This view is useful for teams to understand the overall health of the service over time. - -The Default Branches view shows similar information to the Branches view, but applied to the default branch. It compares the current wall time with the average default branch wall time to give you an indication of how your test suite performance is trending over time. - -[101]: https://app.datadoghq.com/ci/test-services -[102]: https://app.datadoghq.com/ci/test-services?view=branches -[103]: /glossary/#test-service -[104]: /glossary/#wall-time -[105]: /glossary/#flaky-test -[106]: /glossary/#test-regression -[107]: https://app.datadoghq.com/ci/test-services?view=default-branches - -{{% /tab %}} -{{< /tabs >}} - ## Further reading {{< partial name="whats-next/whats-next.html" >}} + +[1]: https://app.datadoghq.com/ci/pipelines +[3]: /continuous_integration/pipelines/github/#enable-log-collection +[4]: /continuous_integration/pipelines/gitlab/#enable-job-log-collection-beta +[5]: /continuous_integration/pipelines/jenkins#enable-job-log-collection +[6]: /account_management/teams/ +[7]: /continuous_integration/pipelines/custom_tags_and_metrics/?tab=linux \ No newline at end of file diff --git a/content/en/continuous_integration/troubleshooting.md b/content/en/continuous_integration/troubleshooting.md index fb3d9280f8abf..62c37fef52b9b 100644 --- a/content/en/continuous_integration/troubleshooting.md +++ b/content/en/continuous_integration/troubleshooting.md @@ -21,195 +21,24 @@ further_reading: This page provides information to help you troubleshot issues with CI Visibility. If you need additional help, contact [Datadog Support][2]. -## CI tests - -### Your tests are instrumented, but Datadog isn't showing any data - -1. Go to the [**Tests**][3] page for the language you're instrumenting and check that the testing framework you are using is supported in the **Compatibility** section. -2. Check if you see any test results in the [**Test Runs**][4] section. If you do see results there, but not in the [**Tests**][5] section, Git information is missing. See [Data appears in Test Runs but not Tests](#data-appears-in-test-runs-but-not-tests) to troubleshoot it. -3. If you are reporting the data through the Datadog Agent, make sure it is running on the host where tests are run (accessible at `localhost:8126`), or if accessible on another hostname or port, make sure you run your tests with the appropriate Agent hostname set in the `DD_AGENT_HOST` and the appropriate port in `DD_TRACE_AGENT_PORT` environment variables. You can activate [debug mode][6] in the tracer to check if it's able to connect to the Agent. -4. If you still don't see any results, [contact Support][2] for troubleshooting help. - -### You are uploading JUnit test reports with `datadog-ci` but some or all tests are missing -If you are uploading JUnit test report files with `datadog-ci` CLI and you do not see the tests, it is likely the tests are being discarded because the report is considered incorrect. - -The following aspects make a JUnit test report incorrect: -* A timestamp of the reported tests that is older than **71 hours** before the moment the report is uploaded. -* A testsuite without a name. - -### Data appears in test runs but not tests - -If you can see test results data in the **Test Runs** tab, but not the **Tests** tab, Git metadata (repository, commit, or branch) is probably missing. To confirm this is the case, open a test execution in the [**Test Runs**][4] section, and check that there is no `git.repository_url`, `git.commit.sha`, or `git.branch`. If these tags are not populated, nothing shows in the [**Tests**][5] section. - -1. Tracers first use the environment variables, if any, set by the CI provider to collect Git information. See [Running tests inside a container][7] for a list of environment variables that the tracer attempts to read for each supported CI provider. At a minimum, this populates the repository, commit hash, and branch information. -2. Next, tracers fetch Git metadata using the local `.git` folder, if present, by executing `git` commands. This populates all Git metadata fields, including commit message, author, and committer information. Ensure the `.git` folder is present and the `git` binary is installed and in `$PATH`. This information is used to populate attributes not detected in the previous step. -3. You can also provide Git information manually using environment variables, which override information detected by any of the previous steps. - - The supported environment variables for providing Git information are: - - `DD_GIT_REPOSITORY_URL` **(required)** - : URL of the repository where the code is stored. Both HTTP and SSH URLs are supported.
- **Example**: `git@github.com:MyCompany/MyApp.git`, `https://github.com/MyCompany/MyApp.git` - - `DD_GIT_COMMIT_SHA` **(required)** - : Full (40-character long SHA1) commit hash.
- **Example**: `a18ebf361cc831f5535e58ec4fae04ffd98d8152` - - `DD_GIT_BRANCH` - : Git branch being tested. Leave empty if providing tag information instead.
- **Example**: `develop` - - `DD_GIT_TAG` - : Git tag being tested (if applicable). Leave empty if providing branch information instead.
- **Example**: `1.0.1` - - `DD_GIT_COMMIT_MESSAGE` - : Commit message.
- **Example**: `Set release number` - - `DD_GIT_COMMIT_AUTHOR_NAME` - : Commit author name.
- **Example**: `John Smith` - - `DD_GIT_COMMIT_AUTHOR_EMAIL` - : Commit author email.
- **Example**: `john@example.com` - - `DD_GIT_COMMIT_AUTHOR_DATE` - : Commit author date in ISO 8601 format.
- **Example**: `2021-03-12T16:00:28Z` - - `DD_GIT_COMMIT_COMMITTER_NAME` - : Commit committer name.
- **Example**: `Jane Smith` - - `DD_GIT_COMMIT_COMMITTER_EMAIL` - : Commit committer email.
- **Example**: `jane@example.com` - - `DD_GIT_COMMIT_COMMITTER_DATE` - : Commit committer date in ISO 8601 format.
- **Example**: `2021-03-12T16:00:28Z` - -4. If no CI provider environment variables are found, tests results are sent with no Git metadata. - -### The tests wall time is empty - -If you cannot see the tests wall time it is likely that the CI provider metadata is missing. To confirm this is the case, open a test execution in the [**Test Runs**][4] section, and check if the `ci.pipeline.id`, `ci.pipeline.name`, `ci.pipeline.number`, or `ci.job.url` tags are missing. If these tags are not populated, then nothing shows in the wall time column. - -1. Tracers use the environment variables set by the CI provider to collect this information. See [Running tests inside a container][7] for a list of environment variables that the tracer attempts to read for each supported CI provider. Make sure that the environment variables have the expected values set. -2. Check that you are running your tests in a supported CI provider. For a list of supported CI providers, see [Running tests inside a container][7]. Only these CI providers can extract the information to enrich the test metadata with CI information. -3. If you still don't see the wall time, contact [Datadog support][2] for help. - -### The tests wall time is not what is expected - -#### How wall time is calculated -The wall time is defined as the time difference between the start time of the first test and the end time of the last test for the given pipeline. - -This is done using the following algorithm: - -1. Compute a hash based on CI information to group the tests. - 1. If the tests include `ci.job.url`, use this tag to calculate the hash. - 2. If the tests don't include `ci.job.url`, use `ci.pipeline.id` + `ci.pipeline.name` + `ci.pipeline.number` to calculate the hash. -2. The calculated wall time is associated to a given hash. **Note**: If there are multiple jobs that execute tests, the wall time is calculated for each job, and the maximum from all calculated wall times is shown. - -#### Possible issues with wall time calculation -If you're using a library for testing time-dependent code, like [timecop][8] for Ruby or [FreezeGun][9] for Python, it is possible that test timestamps are wrong, and therefore calculated wall times. If this is the case, make sure that modifications to time are rolled back before finishing your tests. - -### The test status numbers are not what is expected - -The test status numbers are calculated based on the unique tests that were collected. The uniqueness of a test is defined not only by its suite and name, but by its test parameters and test configurations as well. - -#### The numbers are lower than expected - -If the numbers are lower than expected, it is likely that either the library or the tool you are using to collect test data cannot collect test parameters and/or some test configurations. - -1. If you are uploading JUnit test report files: - 1. If you are running the same tests in different environment configurations, [make sure you are setting those configuration tags during the upload][10]. - 2. If you are running parameterized tests, it's very likely that the JUnit report does not have that information. [Try using a native library to report test data][3]. -2. If you still don't see the expected results, [contact Datadog support][2] for troubleshooting help. - -#### The passed/failed/skipped numbers are different than expected - -If the same test is collected several times for the same commit but with different status, the aggregated result follows the algorithm in the table below: - -| **Test Status - First Try** | **Test Status - Retry #1** | **Result** | -|-----------------------------|----------------------------|------------| -| `Passed` | `Passed` | `Passed` | -| `Passed` | `Failed` | `Passed` | -| `Passed` | `Skipped` | `Passed` | -| `Failed` | `Passed` | `Passed` | -| `Failed` | `Failed` | `Failed` | -| `Failed` | `Skipped` | `Failed` | -| `Skipped` | `Passed` | `Passed` | -| `Skipped` | `Failed` | `Failed` | -| `Skipped` | `Skipped` | `Skipped` | - -### The default branch is not correct - -#### How it impacts the product - -The default branch is used to power some features of the products, namely: - -- Default branches list on the Tests page: This list only displays default branches. Setting the wrong default branch can result in missing or incorrect data in the default branches list. - -- Wall time comparison for non-default branches: On the Tests page, in the Branches view, the **VS Default** column is calculated by comparing wall time for the current branch against wall time for the default branch. - -- New flaky tests: Tests that are not currently classified as flaky in the default branch. If the default branch is not properly set, this could lead to a wrong number of detected new flaky tests. - -- Pipelines list: The pipelines list only displays default branches. Setting the wrong default branch can result in missing or incorrect data in the pipelines list. - -#### How to fix the default branch - -If you have admin access, you can update it from the [Repository Settings Page][11]. - -## CI pipelines - -### Your Jenkins instance is instrumented, but Datadog isn't showing any data +## Your Jenkins instance is instrumented, but Datadog isn't showing any data 1. Make sure that at least one pipeline has finished executing. Pipeline execution information is only sent after the pipeline has finished. 2. Make sure the Datadog Agent host is properly configured and is reachable by the Datadog Plugin. You can test connectivity by clicking on the **Check connectivity with the Datadog Agent** button on the Jenkins plugin configuration UI. 3. Check for any errors in the Jenkins logs. You can enable debug-level logs for the Datadog plugin by [creating a `logging.properties` file][1] and adding the line: `org.datadog.level = ALL`. -### Pipeline not found +## Pipeline not found A "Pipeline not found" message is shown when you click on incomplete data coming from an in-progress pipeline. Data is received progressively for stages, jobs, or custom commands. Wait until the pipeline has finished and try again. -### Missing pipelines on the Pipelines page +## Missing pipelines on the Pipelines page The pipeline page only displays pipelines with no Git information, or pipelines with Git information which belong to the default branch of the Git repository. -### Missing stages or jobs in summary tables +## Missing stages or jobs in summary tables Missing stages or jobs in the _Pipeline Details_ page might be due to a wrong configuration. Make sure that the pipeline name stored in the stage or job executions matches the **same** name of their parent pipeline. If you are using custom pipelines, refer to the [public API endpoint specification][15]. -## Intelligent Test Runner - -### Intelligent Test Runner is not working - -[Intelligent Test Runner][12] works by analyzing your commit history along with code coverage information about past test runs to determine which tests need to be run and which ones can be safely skipped. A minimum amount of information needs to exist in order for the Intelligent Test Runner to work correctly: - -- Your repository needs to have a commit history of at least two commits in the past month. -- You need to have collected test code coverage in past commits, which happens on test runs where Intelligent Test Runner was enabled. -- Your git clone must contain commit and tree history. Intelligent Test Runner tries to unshallow git clones that do not contain history (`git clone --depth=1`), but that might not work on older versions of git. Automatic unshallowing might require additional set up in some CI providers (Harness CI, for example, requires [extra configuration][13] to make sure your pipeline can execute git commands). If your CI job is using shallow git clones, you can change it to use partial git clones by using the following command: `git clone --filter=blob:none`. - -Due to these restrictions, the first time you enable Intelligent Test Runner, you cannot see any tests skipped and the test execution time may be slower than usual because the code coverage is collected automatically. - -Intelligent Test Runner only takes into account the commit history and test code coverage information for the past month. Additionally, it does not take into account code coverage information that is generated more than one week after a commit was made. - -There is a limitation when [synchronizing a fork through GitHub's UI][14] which causes all tests to be run for the generated synchronization commit. - -### Intelligent Test Runner incorrectly skipped a test - -Intelligent Test Runner performs test impact analysis based on code coverage to determine which tests are impacted by a given commit or set of commits. While this strategy works for the majority of tests, there are known scenarios where Intelligent Test Runner could skip a test that should have been run: - -- Changes in library dependencies. -- Changes in compiler options. -- Changes in external services. -- Changes to data files in data-driven tests. - -If you are authoring a commit that includes any of those cases, you can force-disable test skipping in Intelligent Test Runner by adding `ITR:NoSkip` (case insensitive) anywhere in your Git commit message. - ## Further reading {{< partial name="whats-next/whats-next.html" >}} diff --git a/content/en/integrations/guide/source-code-integration.md b/content/en/integrations/guide/source-code-integration.md index 0ae6400428115..85a4b6869d03e 100644 --- a/content/en/integrations/guide/source-code-integration.md +++ b/content/en/integrations/guide/source-code-integration.md @@ -15,7 +15,7 @@ further_reading: - link: "/serverless/aws_lambda/configuration/?tab=datadogcli#link-errors-to-your-source-code" tag: "Documentation" text: "Learn about Serverless Monitoring" -- link: "/continuous_integration/tests/developer_workflows/" +- link: "/tests/developer_workflows/" tag: "Documentation" text: "Learn about CI Visibility" - link: "/security/application_security/" @@ -363,7 +363,7 @@ Setting up the GitHub integration also allows you to see inline code snippets in [103]: /logs/error_tracking/backend/?tab=serilog#setup [104]: /integrations/guide/source-code-integration/?tab=continuousprofiler#links-to-git-providers [105]: /serverless/aws_lambda/configuration/?tab=datadogcli#link-errors-to-your-source-code -[106]: /continuous_integration/tests/developer_workflows/#open-tests-in-github-and-your-ide +[106]: /tests/developer_workflows/#open-tests-in-github-and-your-ide [107]: /security/application_security/ {{% /tab %}} @@ -463,7 +463,7 @@ You can see links from failed test runs to their source repository in **CI Visib For more information, see [Enhancing Developer Workflows with Datadog][102]. [101]: https://app.datadoghq.com/ci/test-runs -[102]: /continuous_integration/tests/developer_workflows#open-tests-in-github-and-your-ide +[102]: /tests/developer_workflows/#open-tests-in-github-and-your-ide {{% /tab %}} {{% tab "Application Security Monitoring" %}} diff --git a/content/en/continuous_integration/intelligent_test_runner/_index.md b/content/en/intelligent_test_runner/_index.md similarity index 99% rename from content/en/continuous_integration/intelligent_test_runner/_index.md rename to content/en/intelligent_test_runner/_index.md index 9face6f8b99dc..fd7767524f6b1 100644 --- a/content/en/continuous_integration/intelligent_test_runner/_index.md +++ b/content/en/intelligent_test_runner/_index.md @@ -1,6 +1,8 @@ --- title: Intelligent Test Runner kind: documentation +aliases: +- /continuous_integration/intelligent_test_runner/ further_reading: - link: "https://www.datadoghq.com/blog/streamline-ci-testing-with-datadog-intelligent-test-runner/" tag: "Blog" diff --git a/content/en/continuous_integration/intelligent_test_runner/setup/_index.md b/content/en/intelligent_test_runner/setup/_index.md similarity index 82% rename from content/en/continuous_integration/intelligent_test_runner/setup/_index.md rename to content/en/intelligent_test_runner/setup/_index.md index 29ea74017e567..be238e0c9fbd0 100644 --- a/content/en/continuous_integration/intelligent_test_runner/setup/_index.md +++ b/content/en/intelligent_test_runner/setup/_index.md @@ -1,6 +1,8 @@ --- title: Configure the Intelligent Test Runner kind: documentation +aliases: +- /continuous_integration/intelligent_test_runner/setup/ type: multi-code-lang --- diff --git a/content/en/continuous_integration/intelligent_test_runner/setup/dotnet.md b/content/en/intelligent_test_runner/setup/dotnet.md similarity index 98% rename from content/en/continuous_integration/intelligent_test_runner/setup/dotnet.md rename to content/en/intelligent_test_runner/setup/dotnet.md index 0e89b5450edea..580fedee6b21c 100644 --- a/content/en/continuous_integration/intelligent_test_runner/setup/dotnet.md +++ b/content/en/intelligent_test_runner/setup/dotnet.md @@ -6,6 +6,7 @@ type: multi-code-lang code_lang_weight: 0 aliases: - continuous_integration/intelligent_test_runner/dotnet/ + - continuous_integration/intelligent_test_runner/setup/dotnet/ further_reading: - link: "/continuous_integration/tests" tag: "Documentation" diff --git a/content/en/continuous_integration/intelligent_test_runner/setup/java.md b/content/en/intelligent_test_runner/setup/java.md similarity index 99% rename from content/en/continuous_integration/intelligent_test_runner/setup/java.md rename to content/en/intelligent_test_runner/setup/java.md index 58c663b3ef5c9..163108bbec05c 100644 --- a/content/en/continuous_integration/intelligent_test_runner/setup/java.md +++ b/content/en/intelligent_test_runner/setup/java.md @@ -7,6 +7,8 @@ type: multi-code-lang code_lang_weight: 10 aliases: - continuous_integration/intelligent_test_runner/java/ + - continuous_integration/intelligent_test_runner/setup/java/ + further_reading: - link: "/continuous_integration/tests" tag: "Documentation" diff --git a/content/en/continuous_integration/intelligent_test_runner/setup/javascript.md b/content/en/intelligent_test_runner/setup/javascript.md similarity index 98% rename from content/en/continuous_integration/intelligent_test_runner/setup/javascript.md rename to content/en/intelligent_test_runner/setup/javascript.md index 5f362c65a294f..1a220088d56c7 100644 --- a/content/en/continuous_integration/intelligent_test_runner/setup/javascript.md +++ b/content/en/intelligent_test_runner/setup/javascript.md @@ -6,6 +6,7 @@ type: multi-code-lang code_lang_weight: 20 aliases: - continuous_integration/intelligent_test_runner/javascript/ + - continuous_integration/intelligent_test_runner/setup/javascript/ further_reading: - link: "/continuous_integration/tests" tag: "Documentation" diff --git a/content/en/continuous_integration/intelligent_test_runner/setup/python.md b/content/en/intelligent_test_runner/setup/python.md similarity index 98% rename from content/en/continuous_integration/intelligent_test_runner/setup/python.md rename to content/en/intelligent_test_runner/setup/python.md index b264211e64e0f..83c9faad733ec 100644 --- a/content/en/continuous_integration/intelligent_test_runner/setup/python.md +++ b/content/en/intelligent_test_runner/setup/python.md @@ -6,6 +6,7 @@ type: multi-code-lang code_lang_weight: 30 aliases: - continuous_integration/intelligent_test_runner/python/ + - continuous_integration/intelligent_test_runner/setup/python/ further_reading: - link: "/continuous_integration/tests" tag: "Documentation" diff --git a/content/en/continuous_integration/intelligent_test_runner/setup/swift.md b/content/en/intelligent_test_runner/setup/swift.md similarity index 96% rename from content/en/continuous_integration/intelligent_test_runner/setup/swift.md rename to content/en/intelligent_test_runner/setup/swift.md index b1388684eff80..1dbe5b11c5953 100644 --- a/content/en/continuous_integration/intelligent_test_runner/setup/swift.md +++ b/content/en/intelligent_test_runner/setup/swift.md @@ -7,6 +7,7 @@ type: multi-code-lang code_lang_weight: 40 aliases: - continuous_integration/intelligent_test_runner/swift/ + - continuous_integration/intelligent_test_runner/setup/swift/ further_reading: - link: "/continuous_integration/tests" tag: "Documentation" diff --git a/content/en/intelligent_test_runner/troubleshooting/_index.md b/content/en/intelligent_test_runner/troubleshooting/_index.md new file mode 100644 index 0000000000000..575933a0bd7e6 --- /dev/null +++ b/content/en/intelligent_test_runner/troubleshooting/_index.md @@ -0,0 +1,61 @@ +--- +title: Intelligent Test Runner Troubleshooting +kind: documentation +further_reading: +- link: "/intelligent_test_runner" + tag: "Documentation" + text: "Learn about the Intelligent Test Runner" +--- + +{{< site-region region="gov" >}} +
Intelligent Test Runner is not available in the selected site ({{< region-param key="dd_site_name" >}}) at this time.
+{{< /site-region >}} + +## Overview + +This page provides information to help you troubleshot issues with Intelligent Test Runner. If you need additional help, contact [Datadog Support][2]. + +## Intelligent Test Runner is not working + +[Intelligent Test Runner][12] works by analyzing your commit history along with code coverage information about past test runs to determine which tests need to be run and which ones can be safely skipped. A minimum amount of information needs to exist in order for Intelligent Test Runner to work correctly: + +- Your repository needs to have a commit history of at least two commits in the past month. +- You need to have collected test code coverage in past commits, which happens on test runs where Intelligent Test Runner was enabled. +- Your git clone must contain commit and tree history. Intelligent Test Runner tries to unshallow git clones that do not contain history (`git clone --depth=1`), but that might not work on older versions of git. Automatic unshallowing might require additional set up in some CI providers (Harness CI, for example, requires [extra configuration][13] to make sure your pipeline can execute git commands). If your CI job is using shallow git clones, you can change it to use partial git clones by using the following command: `git clone --filter=blob:none`. + +Due to these restrictions, the first time you enable Intelligent Test Runner, you cannot see any tests skipped and the test execution time may be slower than usual because the code coverage is collected automatically. + +Intelligent Test Runner only takes into account the commit history and test code coverage information for the past month. Additionally, it does not take into account code coverage information that is generated more than one week after a commit was made. + +There is a limitation when [synchronizing a fork through GitHub's UI][14] which causes all tests to be run for the generated synchronization commit. + +## Intelligent Test Runner incorrectly skipped a test + +Intelligent Test Runner performs test impact analysis based on code coverage to determine which tests are impacted by a given commit or set of commits. While this strategy works for the majority of tests, there are known scenarios where Intelligent Test Runner could skip a test that should have been run: + +- Changes in library dependencies. +- Changes in compiler options. +- Changes in external services. +- Changes to data files in data-driven tests. + +If you are authoring a commit that includes any of those cases, you can force-disable test skipping in Intelligent Test Runner by adding `ITR:NoSkip` (case insensitive) anywhere in your Git commit message. + +## Further reading + +{{< partial name="whats-next/whats-next.html" >}} + +[1]: https://www.jenkins.io/doc/book/system-administration/viewing-logs/ +[2]: /help/ +[3]: /continuous_integration/tests/ +[4]: https://app.datadoghq.com/ci/test-runs +[5]: https://app.datadoghq.com/ci/test-services +[6]: /tracing/troubleshooting/tracer_debug_logs +[7]: /continuous_integration/tests/containers/ +[8]: https://github.com/travisjeffery/timecop +[9]: https://github.com/spulec/freezegun +[10]: /continuous_integration/tests/junit_upload/?tabs=linux#collecting-environment-configuration-metadata +[11]: https://app.datadoghq.com/ci/settings/repository +[12]: /continuous_integration/intelligent_test_runner/ +[13]: https://developer.harness.io/kb/continuous-integration/articles/using_git_credentials_from_codebase_connector_in_ci_pipelines_run_step/ +[14]: https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/working-with-forks/syncing-a-fork#syncing-a-fork-branch-from-the-web-ui +[15]: /api/latest/ci-visibility-pipelines/#send-pipeline-event diff --git a/content/en/continuous_integration/tests/_index.md b/content/en/tests/_index.md similarity index 83% rename from content/en/continuous_integration/tests/_index.md rename to content/en/tests/_index.md index cc31070efef57..c1cab620eaed0 100644 --- a/content/en/continuous_integration/tests/_index.md +++ b/content/en/tests/_index.md @@ -5,6 +5,7 @@ aliases: - /continuous_integration/explore_tests/ - /continuous_integration/guides/test_configurations/ - /continuous_integration/integrate_tests/ + - /continuous_integration/tests/ further_reading: - link: "/monitors/types/ci/" tag: "Documentation" @@ -38,17 +39,17 @@ cascade: ## Setup {{< whatsnext desc="Choose a language to set up Test Visibility in Datadog:" >}} - {{< nextlink href="continuous_integration/tests/setup/dotnet" >}}.NET{{< /nextlink >}} - {{< nextlink href="continuous_integration/tests/setup/java" >}}Java{{< /nextlink >}} - {{< nextlink href="continuous_integration/tests/setup/javascript" >}}JavaScript{{< /nextlink >}} - {{< nextlink href="continuous_integration/tests/setup/python" >}}Python{{< /nextlink >}} - {{< nextlink href="continuous_integration/tests/setup/ruby" >}}Ruby{{< /nextlink >}} - {{< nextlink href="continuous_integration/tests/setup/swift" >}}Swift{{< /nextlink >}} - {{< nextlink href="continuous_integration/tests/junit_upload" >}}Uploading JUnit test report files to Datadog{{< /nextlink >}} - {{< nextlink href="continuous_integration/tests/containers" >}}Tests running in containers{{< /nextlink >}} + {{< nextlink href="tests/setup/dotnet" >}}.NET{{< /nextlink >}} + {{< nextlink href="tests/setup/java" >}}Java{{< /nextlink >}} + {{< nextlink href="tests/setup/javascript" >}}JavaScript{{< /nextlink >}} + {{< nextlink href="tests/setup/python" >}}Python{{< /nextlink >}} + {{< nextlink href="tests/setup/ruby" >}}Ruby{{< /nextlink >}} + {{< nextlink href="tests/setup/swift" >}}Swift{{< /nextlink >}} + {{< nextlink href="tests/setup/junit_xml" >}}Uploading JUnit test report files to Datadog{{< /nextlink >}} + {{< nextlink href="tests/containers" >}}Tests running in containers{{< /nextlink >}} {{< /whatsnext >}} -In addition to tests, CI Visibility provides visibility over the whole testing phase of your project (except for Ruby). +In addition to tests, Test Visibility provides visibility over the whole testing phase of your project (except for Ruby). ### Supported features @@ -73,13 +74,13 @@ In addition to tests, CI Visibility provides visibility over the whole testing p Tests evaluate the behavior of code for a set of given conditions. Some of those conditions are related to the environment where the tests are run, such as the operating system or the runtime used. The same code executed under different sets of conditions can behave differently, so developers usually configure their tests to run in different sets of conditions and validate that the behavior is the expected for all of them. This specific set of conditions is called a *configuration*. -In CI Visibility, a test with multiple configurations is treated as multiple tests with a separate test for each configuration. In the case where one of the configurations fails but the others pass, only that specific test and configuration combination is marked as failed. +In Test Visibility, a test with multiple configurations is treated as multiple tests with a separate test for each configuration. In the case where one of the configurations fails but the others pass, only that specific test and configuration combination is marked as failed. For example, suppose you're testing a single commit and you have a Python test that runs against three different Python versions. If the test fails for one of those versions, that specific test is marked as failed, while the other versions are marked as passed. If you retry the tests against the same commit and now the test for all three Python versions pass, the test with the version that previously failed is now marked as both passed and flaky, while the other two versions remain passed, with no flakiness detected. ### Test configuration attributes -When you run your tests with CI Visibility, the library detects and reports information about the environment where tests are run as test tags. For example, the operating system name, such as `Windows` or `Linux`, and the architecture of the platform, such as `arm64` or `x86_64`, are added as tags on each test. These values are shown in the commit and on branch overview pages when a test fails or is flaky for a specific configuration but not others. +When you run your tests with Test Visibility, the library detects and reports information about the environment where tests are run as test tags. For example, the operating system name, such as `Windows` or `Linux`, and the architecture of the platform, such as `arm64` or `x86_64`, are added as tags on each test. These values are shown in the commit and on branch overview pages when a test fails or is flaky for a specific configuration but not others. The following tags are automatically collected to identify test configurations, and some may only apply to specific platforms: @@ -101,7 +102,7 @@ The following tags are automatically collected to identify test configurations, ## Custom configurations -There are some configurations that cannot be directly identified and reported automatically because they can depend on environment variables, test run arguments, or other approaches that developers use. For those cases, you must provide the configuration details to the library so CI Visibility can properly identify them. +There are some configurations that cannot be directly identified and reported automatically because they can depend on environment variables, test run arguments, or other approaches that developers use. For those cases, you must provide the configuration details to the library so Test Visibility can properly identify them. Define these tags as part of the `DD_TAGS` environment variable using the `test.configuration` prefix. @@ -122,10 +123,10 @@ In order to filter using these configurations tags, [you must create facets for ### Integrations {{< whatsnext desc="Learn about the following integrations with Test Visibility:" >}} -{{< nextlink href="/continuous_integration/tests/developer_workflows" >}}Enhancing Developer Workflows with Datadog{{< /nextlink >}} -{{< nextlink href="/continuous_integration/tests/code_coverage" >}}Code Coverage{{< /nextlink >}} -{{< nextlink href="/continuous_integration/tests/browser_tests" >}}Instrument Cypress Browser Tests with Browser RUM{{< /nextlink >}} -{{< nextlink href="/continuous_integration/tests/swift_tests" >}}Instrument Swift Tests with Browser RUM{{< /nextlink >}} +{{< nextlink href="/tests/developer_workflows/" >}}Enhancing Developer Workflows with Datadog{{< /nextlink >}} +{{< nextlink href="/tests/code_coverage" >}}Code Coverage{{< /nextlink >}} +{{< nextlink href="/tests/browser_tests" >}}Instrument Cypress Browser Tests with Browser RUM{{< /nextlink >}} +{{< nextlink href="/tests/swift_tests" >}}Instrument Swift Tests with Browser RUM{{< /nextlink >}} {{< /whatsnext >}} If [Intelligent Test Runner][13] is enabled for .NET, JavaScript, or Swift, [code coverage information][12], including file names and line numbers covered by each test, are collected from your projects. @@ -146,5 +147,5 @@ When you evaluate failed or flaky tests, or the performance of a CI test on the [9]: https://app.datadoghq.com/notebook/list [10]: https://app.datadoghq.com/ci/test-runs [11]: /monitors/types/ci/ -[12]: /continuous_integration/guides/code_coverage/ -[13]: /continuous_integration/intelligent_test_runner/ +[12]: /tests/code_coverage/ +[13]: /intelligent_test_runner/ diff --git a/content/en/continuous_integration/tests/browser_tests.md b/content/en/tests/browser_tests.md similarity index 95% rename from content/en/continuous_integration/tests/browser_tests.md rename to content/en/tests/browser_tests.md index 3839ad51ad99f..8147e44fd2e60 100644 --- a/content/en/continuous_integration/tests/browser_tests.md +++ b/content/en/tests/browser_tests.md @@ -5,6 +5,7 @@ description: Learn how to use CI Visibility and RUM to connect your test results aliases: - /continuous_integration/guides/rum_integration - /continuous_integration/integrate_tests/browser_tests +- /continuous_integration/tests/browser_tests further_reading: - link: "/continuous_integration/tests" tag: "Documentation" @@ -52,6 +53,6 @@ The RUM session has all the data that [RUM normally collects][3] so you can debu {{< partial name="whats-next/whats-next.html" >}} -[1]: /continuous_integration/tests/javascript/?tab=cypress#instrument-your-tests +[1]: /tests/setup/javascript/?tab=cypress#instrument-your-tests [2]: /real_user_monitoring/browser/ [3]: /real_user_monitoring/browser/data_collected/ diff --git a/content/en/continuous_integration/tests/code_coverage.md b/content/en/tests/code_coverage.md similarity index 99% rename from content/en/continuous_integration/tests/code_coverage.md rename to content/en/tests/code_coverage.md index 112ba92241441..79215fd47b7d4 100644 --- a/content/en/continuous_integration/tests/code_coverage.md +++ b/content/en/tests/code_coverage.md @@ -5,8 +5,9 @@ description: Learn how to report and use code coverage in Datadog. aliases: - /continuous_integration/guides/code_coverage/ - /continuous_integration/integrate_tests/code_coverage/ +- /continuous_integration/tests/code_coverage/ further_reading: -- link: "/continuous_integration/tests" +- link: "/tests" tag: "Documentation" text: "Learn about Test Visibility" - link: "/monitors/types/ci" @@ -234,7 +235,7 @@ Get alerted whenever code coverage for your service drops below a certain thresh {{< partial name="whats-next/whats-next.html" >}} -[1]: /continuous_integration/tests/ +[1]: /tests/ [2]: /dashboards [3]: /notebooks [4]: /monitors diff --git a/content/en/continuous_integration/tests/containers.md b/content/en/tests/containers.md similarity index 95% rename from content/en/continuous_integration/tests/containers.md rename to content/en/tests/containers.md index b4ad6c074d746..bcdeee06867e5 100644 --- a/content/en/continuous_integration/tests/containers.md +++ b/content/en/tests/containers.md @@ -3,10 +3,15 @@ title: Tests in Containers kind: documentation aliases: - /continuous_integration/setup_tests/containers + - /continuous_integration/tests/containers +further_reading: +- link: "/tests" + tag: "Documentation" + text: "Learn about Test Visibility" --- {{< site-region region="gov" >}} -
CI Visibility is not available in the selected site ({{< region-param key="dd_site_name" >}}) at this time.
+
Test Visibility is not available in the selected site ({{< region-param key="dd_site_name" >}}) at this time.
{{< /site-region >}} ## Overview @@ -295,6 +300,10 @@ Additionally, you need to pass in the environment variables required to configur {{% /tab %}} {{< /tabs >}} +## Further reading + +{{< partial name="whats-next/whats-next.html" >}} + [1]: https://docs.docker.com/engine/reference/run/ [2]: https://docs.docker.com/compose/reference/ -[3]: /continuous_integration/tests/ +[3]: /tests/ diff --git a/content/en/continuous_integration/tests/developer_workflows.md b/content/en/tests/developer_workflows.md similarity index 94% rename from content/en/continuous_integration/tests/developer_workflows.md rename to content/en/tests/developer_workflows.md index 43e58bdd31f38..60502c8150f2c 100644 --- a/content/en/continuous_integration/tests/developer_workflows.md +++ b/content/en/tests/developer_workflows.md @@ -6,6 +6,7 @@ aliases: - /continuous_integration/guides/developer_workflows - /continuous_integration/guides/pull_request_comments - /continuous_integration/integrate_tests/developer_workflows +- /continuous_integration/tests/developer_workflows further_reading: - link: "https://www.datadoghq.com/blog/datadog-github-actions-ci-visibility/" tag: "Blog" @@ -18,10 +19,10 @@ further_reading: ## Overview {{< site-region region="gov" >}} -
CI Visibility is not available in the selected site ({{< region-param key="dd_site_name" >}}) at this time.
+
Test Visibility is not available in the selected site ({{< region-param key="dd_site_name" >}}) at this time.
{{< /site-region >}} -[CI Test Visibility][5] integrates with other developer-oriented Datadog products as well as external partners such as GitHub to streamline developer workflows with features including being able to: +[Test Visibility][5] integrates with other developer-oriented Datadog products as well as external partners such as GitHub to streamline developer workflows with features including being able to: - [Create and open GitHub issues](#create-and-open-github-issues) - [Open tests in GitHub and your IDE](#open-tests-in-github-and-your-ide) diff --git a/content/en/tests/guides/_index.md b/content/en/tests/guides/_index.md new file mode 100644 index 0000000000000..16726d79bddc5 --- /dev/null +++ b/content/en/tests/guides/_index.md @@ -0,0 +1,16 @@ +--- +title: Test Visibility Guides +kind: guide +private: true +disable_toc: true +cascade: + algolia: + rank: 20 + category: Guide + subcategory: Test Visibility Guides +--- + +{{< whatsnext desc="Test Visibility Guides:" >}} + {{< nextlink href="/tests/guides/flaky_test_management" >}}Flaky Test Management{{< /nextlink >}} + {{< nextlink href="/tests/guides/add_custom_metrics" >}}Add Custom Metrics to Your Tests{{< /nextlink >}} +{{< /whatsnext >}} diff --git a/content/en/continuous_integration/guides/add_custom_metrics.md b/content/en/tests/guides/add_custom_metrics.md similarity index 98% rename from content/en/continuous_integration/guides/add_custom_metrics.md rename to content/en/tests/guides/add_custom_metrics.md index 49af329c3dd4f..7766bd1e28347 100644 --- a/content/en/continuous_integration/guides/add_custom_metrics.md +++ b/content/en/tests/guides/add_custom_metrics.md @@ -2,6 +2,8 @@ title: Add Custom Metrics To Your Tests kind: guide description: Learn how to use custom metrics (measures) in your tests. +aliases: +- /continuous_integration/guides/add_custom_metrics/ further_reading: - link: "/continuous_integration/tests" tag: "Documentation" diff --git a/content/en/continuous_integration/guides/flaky_test_management.md b/content/en/tests/guides/flaky_test_management.md similarity index 98% rename from content/en/continuous_integration/guides/flaky_test_management.md rename to content/en/tests/guides/flaky_test_management.md index d4e7bb9514718..9af73ecc4c4a2 100644 --- a/content/en/continuous_integration/guides/flaky_test_management.md +++ b/content/en/tests/guides/flaky_test_management.md @@ -3,6 +3,7 @@ title: Flaky Test Management kind: guide aliases: - /continuous_integration/guides/find_flaky_tests/ +- /continuous_integration/guides/flaky_test_management/ further_reading: - link: "/continuous_integration/tests/" tag: "Documentation" diff --git a/content/en/tests/search/_index.md b/content/en/tests/search/_index.md new file mode 100644 index 0000000000000..0714e8fba887e --- /dev/null +++ b/content/en/tests/search/_index.md @@ -0,0 +1,93 @@ +--- +title: Search and Manage CI Tests +description: Learn how to search for your CI tests. +algolia: + rank: 70 + tags: ['flaky test', 'flaky tests', 'test regression', 'test regressions', 'test service', 'test services'] +further_reading: +- link: "/continuous_integration/explorer" + tag: "Documentation" + text: "Search and filter test runs" +- link: "/continuous_integration/guides/flaky_test_management" + tag: "Documentation" + text: "Learn how to manage flaky tests" +--- + +{{< site-region region="gov" >}} +
CI Visibility is not available in the selected site ({{< region-param key="dd_site_name" >}}) at this time.
+{{< /site-region >}} + +## Overview + +The [Tests page][1] is useful for developers who want to keep an eye on their test results. + +{{< img src="/continuous_integration/tests.png" text="CI Tests page" style="width:100%" >}} + +You can access low-level and immediate insights: + +- See what tests are failing and why. +- See your last commit's test results. +- View the wall time of your tests in your feature branch and compare it to the default branch, to identify if you're about to introduce a performance regression. +- Find out if your commit introduces a new [flaky test][5] that wasn't flaky before, indicating that your code change is what's making it flaky. This gives you the opportunity to fix the problem before proceeding rather than contributing to the number of flaky tests in your CI. + +You can also access high-level accumulation and trends: + +- See the effects that changed code, added tests, and increased complexity have on your test suite performance over time. +- See which tests have become slower over time and identify the commit that introduced the regression. +- Take advantage of Datadog's automatic test flakiness detection and tracking, which shows you which tests are becoming more or less unreliable over time. + +## Search for tests + +To see your tests, navigate to [**CI** > **Tests**][1] and select between the [**Branches**](#branches-view) or [**Default Branches** view](#default-branches-view). + +### Branches view + +The [Branches][2] view of the Tests page lists all branches from all [test services][3] that have reported test results. This tab is useful for individual developers to quickly see the status of tests that run on their code branches and troubleshoot test failures. + +In this page, you can filter the list by name, test service, or commit SHA, or to show only your branches (branches that contain at least one commit authored by you), enable the **My branches** toggle and add the email addresses you use in your Git configuration. + +#### Test results + +For each branch, you can see the test service, the number of failed, passed, and skipped tests, test regressions, wall time, the percentage of change compared to the default branch, when the commit was last updated, and the avatar of the author of the commit. + +Click on a branch to explore the test details page, which includes information about the branch's latest commits, flaky tests, test performance, common error types, and all test runs. + +{{< img src="continuous_integration/test_details.png" alt="Test Details page for a single branch" style="width:100%;">}} + +#### Test suite performance + +There is also information about the [wall time][4] of the most recent test suite run, and a comparison to the average wall time of the default branch. The comparison of your branch's wall time to the default branch's wall time can help you determine if your commit is introducing performance [regressions][6] to your test suite. + +Hovering over the commit author avatar shows detailed information about the latest commit. + +#### Test regressions + +[Test regressions][6] are evaluated per commit in an effort to tie performance regressions to specific code changes. + +#### Investigate for more details + +Click on the row to see test suite run details such as test results for the last commit on this branch (or you can switch branches), failing tests and the most common errors, slow tests, flaky tests, and a complete list of test runs over the time frame selected. You can filter this list of test runs by facet to get to the information you want to see most. + +Click into one of the test runs to see the test trace as a flame graph or a span list. The _Runs (n)_ list on the left lets you quickly access traces for each retry of the test for the same commit. + +#### Explore connections to services, resources, logs, and network events + +Click the CI provider link to examine the Resource, Service, or Analytics page for the test. You can also find complete tags information and links to related log events and network monitoring events. + +### Default Branches view + +The [Default Branches][7] view of the Tests page shows aggregated health metrics for the _default_ branch of each test service. This view is useful for teams to understand the overall health of the service over time. + +The Default Branches view shows similar information to the Branches view, but applied to the default branch. It compares the current wall time with the average default branch wall time to give you an indication of how your test suite performance is trending over time. + +## Further reading + +{{< partial name="whats-next/whats-next.html" >}} + +[1]: https://app.datadoghq.com/ci/test-services +[2]: https://app.datadoghq.com/ci/test-services?view=branches +[3]: /glossary/#test-service +[4]: /glossary/#wall-time +[5]: /glossary/#flaky-test +[6]: /glossary/#test-regression +[7]: https://app.datadoghq.com/ci/test-services?view=default-branches \ No newline at end of file diff --git a/content/en/continuous_integration/tests/setup/_index.md b/content/en/tests/setup/_index.md similarity index 85% rename from content/en/continuous_integration/tests/setup/_index.md rename to content/en/tests/setup/_index.md index 68e70780a043c..8277d84b53b01 100644 --- a/content/en/continuous_integration/tests/setup/_index.md +++ b/content/en/tests/setup/_index.md @@ -2,6 +2,8 @@ title: Configure Test Visibility kind: documentation type: multi-code-lang +aliases: +- continuous_integration/tests/setup/ --- For information about configuration options for [Test Visibility][1], choose your language: diff --git a/content/en/continuous_integration/tests/setup/dotnet.md b/content/en/tests/setup/dotnet.md similarity index 99% rename from content/en/continuous_integration/tests/setup/dotnet.md rename to content/en/tests/setup/dotnet.md index a436c17eca070..6e0e0be6bcaa2 100644 --- a/content/en/continuous_integration/tests/setup/dotnet.md +++ b/content/en/tests/setup/dotnet.md @@ -7,6 +7,7 @@ code_lang_weight: 0 aliases: - /continuous_integration/setup_tests/dotnet - /continuous_integration/tests/dotnet + - continuous_integration/tests/setup/dotnet further_reading: - link: "/continuous_integration/tests/containers/" tag: "Documentation" diff --git a/content/en/continuous_integration/tests/setup/java.md b/content/en/tests/setup/java.md similarity index 99% rename from content/en/continuous_integration/tests/setup/java.md rename to content/en/tests/setup/java.md index 5e681773bc9ea..a19206c03cb53 100644 --- a/content/en/continuous_integration/tests/setup/java.md +++ b/content/en/tests/setup/java.md @@ -7,6 +7,7 @@ code_lang_weight: 10 aliases: - /continuous_integration/setup_tests/java - /continuous_integration/tests/java + - continuous_integration/tests/setup/java further_reading: - link: "/continuous_integration/tests/containers/" tag: "Documentation" diff --git a/content/en/continuous_integration/tests/setup/javascript.md b/content/en/tests/setup/javascript.md similarity index 99% rename from content/en/continuous_integration/tests/setup/javascript.md rename to content/en/tests/setup/javascript.md index 9a1908e583bf7..4d9dd0b31ed08 100644 --- a/content/en/continuous_integration/tests/setup/javascript.md +++ b/content/en/tests/setup/javascript.md @@ -7,6 +7,7 @@ code_lang_weight: 20 aliases: - /continuous_integration/setup_tests/javascript - /continuous_integration/tests/javascript + - continuous_integration/tests/setup/javascript further_reading: - link: "/continuous_integration/tests/containers/" tag: "Documentation" diff --git a/content/en/continuous_integration/tests/setup/junit_xml.md b/content/en/tests/setup/junit_xml.md similarity index 99% rename from content/en/continuous_integration/tests/setup/junit_xml.md rename to content/en/tests/setup/junit_xml.md index 3260ae8082bfc..ca4521272e8c4 100644 --- a/content/en/continuous_integration/tests/setup/junit_xml.md +++ b/content/en/tests/setup/junit_xml.md @@ -7,6 +7,7 @@ code_lang_weight: 60 aliases: - /continuous_integration/setup_tests/junit_upload - /continuous_integration/tests/junit_upload + - continuous_integration/tests/setup/junit_xml further_reading: - link: "/continuous_integration/tests" tag: "Documentation" diff --git a/content/en/continuous_integration/tests/setup/python.md b/content/en/tests/setup/python.md similarity index 99% rename from content/en/continuous_integration/tests/setup/python.md rename to content/en/tests/setup/python.md index 52e95596e4789..b74d4fd2f6038 100644 --- a/content/en/continuous_integration/tests/setup/python.md +++ b/content/en/tests/setup/python.md @@ -7,6 +7,7 @@ code_lang_weight: 30 aliases: - /continuous_integration/setup_tests/python - /continuous_integration/tests/python + - continuous_integration/tests/setup/python further_reading: - link: "/continuous_integration/tests/containers/" tag: "Documentation" diff --git a/content/en/continuous_integration/tests/setup/ruby.md b/content/en/tests/setup/ruby.md similarity index 99% rename from content/en/continuous_integration/tests/setup/ruby.md rename to content/en/tests/setup/ruby.md index 67efd96ef7da2..8fe5f6dc18f62 100644 --- a/content/en/continuous_integration/tests/setup/ruby.md +++ b/content/en/tests/setup/ruby.md @@ -7,6 +7,7 @@ code_lang_weight: 40 aliases: - /continuous_integration/setup_tests/ruby - /continuous_integration/tests/ruby + - continuous_integration/tests/setup/ruby further_reading: - link: "/continuous_integration/tests/containers/" tag: "Documentation" diff --git a/content/en/continuous_integration/tests/setup/swift.md b/content/en/tests/setup/swift.md similarity index 99% rename from content/en/continuous_integration/tests/setup/swift.md rename to content/en/tests/setup/swift.md index 26cb10e92ff54..32cdca6920c29 100644 --- a/content/en/continuous_integration/tests/setup/swift.md +++ b/content/en/tests/setup/swift.md @@ -7,6 +7,7 @@ code_lang_weight: 50 aliases: - /continuous_integration/setup_tests/swift - /continuous_integration/tests/swift + - continuous_integration/tests/setup/swift further_reading: - link: "/continuous_integration/tests" tag: "Documentation" diff --git a/content/en/continuous_integration/tests/swift_tests.md b/content/en/tests/swift_tests.md similarity index 97% rename from content/en/continuous_integration/tests/swift_tests.md rename to content/en/tests/swift_tests.md index 557719c591aca..8a8cfe5b386d5 100644 --- a/content/en/continuous_integration/tests/swift_tests.md +++ b/content/en/tests/swift_tests.md @@ -5,6 +5,7 @@ description: Learn how to use CI Visibility and RUM to connect your Swift test r aliases: - /continuous_integration/guides/rum_swift_integration - /continuous_integration/integrate_tests/swift_tests +- continuous_integration/tests/swift_tests further_reading: - link: "/continuous_integration/tests" tag: "Documentation" diff --git a/content/en/tests/troubleshooting/_index.md b/content/en/tests/troubleshooting/_index.md new file mode 100644 index 0000000000000..1141ded83ab5e --- /dev/null +++ b/content/en/tests/troubleshooting/_index.md @@ -0,0 +1,175 @@ +--- +title: Test Visibility Troubleshooting +kind: documentation +further_reading: + - link: "/continuous_integration/tests" + tag: "Documentation" + text: "Learn how to monitor your CI tests" +--- + +{{< site-region region="gov" >}} +
CI Visibility is not available in the selected site ({{< region-param key="dd_site_name" >}}) at this time.
+{{< /site-region >}} + +## Overview + +This page provides information to help you troubleshot issues with Test Visibility. If you need additional help, contact [Datadog Support][2]. + +## Your tests are instrumented, but Datadog isn't showing any data + +1. Go to the [**Tests**][3] page for the language you're instrumenting and check that the testing framework you are using is supported in the **Compatibility** section. +2. Check if you see any test results in the [**Test Runs**][4] section. If you do see results there, but not in the [**Tests**][5] section, Git information is missing. See [Data appears in Test Runs but not Tests](#data-appears-in-test-runs-but-not-tests) to troubleshoot it. +3. If you are reporting the data through the Datadog Agent, make sure it is running on the host where tests are run (accessible at `localhost:8126`), or if accessible on another hostname or port, make sure you run your tests with the appropriate Agent hostname set in the `DD_AGENT_HOST` and the appropriate port in `DD_TRACE_AGENT_PORT` environment variables. You can activate [debug mode][6] in the tracer to check if it's able to connect to the Agent. +4. If you still don't see any results, [contact Support][2] for troubleshooting help. + +## You are uploading JUnit test reports with `datadog-ci` but some or all tests are missing +If you are uploading JUnit test report files with `datadog-ci` CLI and you do not see the tests, it is likely the tests are being discarded because the report is considered incorrect. + +The following aspects make a JUnit test report incorrect: +* A timestamp of the reported tests that is older than **71 hours** before the moment the report is uploaded. +* A testsuite without a name. + +## Data appears in test runs but not tests + +If you can see test results data in the **Test Runs** tab, but not the **Tests** tab, Git metadata (repository, commit, or branch) is probably missing. To confirm this is the case, open a test execution in the [**Test Runs**][4] section, and check that there is no `git.repository_url`, `git.commit.sha`, or `git.branch`. If these tags are not populated, nothing shows in the [**Tests**][5] section. + +1. Tracers first use the environment variables, if any, set by the CI provider to collect Git information. See [Running tests inside a container][7] for a list of environment variables that the tracer attempts to read for each supported CI provider. At a minimum, this populates the repository, commit hash, and branch information. +2. Next, tracers fetch Git metadata using the local `.git` folder, if present, by executing `git` commands. This populates all Git metadata fields, including commit message, author, and committer information. Ensure the `.git` folder is present and the `git` binary is installed and in `$PATH`. This information is used to populate attributes not detected in the previous step. +3. You can also provide Git information manually using environment variables, which override information detected by any of the previous steps. + + The supported environment variables for providing Git information are: + + `DD_GIT_REPOSITORY_URL` **(required)** + : URL of the repository where the code is stored. Both HTTP and SSH URLs are supported.
+ **Example**: `git@github.com:MyCompany/MyApp.git`, `https://github.com/MyCompany/MyApp.git` + + `DD_GIT_COMMIT_SHA` **(required)** + : Full (40-character long SHA1) commit hash.
+ **Example**: `a18ebf361cc831f5535e58ec4fae04ffd98d8152` + + `DD_GIT_BRANCH` + : Git branch being tested. Leave empty if providing tag information instead.
+ **Example**: `develop` + + `DD_GIT_TAG` + : Git tag being tested (if applicable). Leave empty if providing branch information instead.
+ **Example**: `1.0.1` + + `DD_GIT_COMMIT_MESSAGE` + : Commit message.
+ **Example**: `Set release number` + + `DD_GIT_COMMIT_AUTHOR_NAME` + : Commit author name.
+ **Example**: `John Smith` + + `DD_GIT_COMMIT_AUTHOR_EMAIL` + : Commit author email.
+ **Example**: `john@example.com` + + `DD_GIT_COMMIT_AUTHOR_DATE` + : Commit author date in ISO 8601 format.
+ **Example**: `2021-03-12T16:00:28Z` + + `DD_GIT_COMMIT_COMMITTER_NAME` + : Commit committer name.
+ **Example**: `Jane Smith` + + `DD_GIT_COMMIT_COMMITTER_EMAIL` + : Commit committer email.
+ **Example**: `jane@example.com` + + `DD_GIT_COMMIT_COMMITTER_DATE` + : Commit committer date in ISO 8601 format.
+ **Example**: `2021-03-12T16:00:28Z` + +4. If no CI provider environment variables are found, tests results are sent with no Git metadata. + +## The tests wall time is empty + +If you cannot see the tests wall time it is likely that the CI provider metadata is missing. To confirm this is the case, open a test execution in the [**Test Runs**][4] section, and check if the `ci.pipeline.id`, `ci.pipeline.name`, `ci.pipeline.number`, or `ci.job.url` tags are missing. If these tags are not populated, then nothing shows in the wall time column. + +1. Tracers use the environment variables set by the CI provider to collect this information. See [Running tests inside a container][7] for a list of environment variables that the tracer attempts to read for each supported CI provider. Make sure that the environment variables have the expected values set. +2. Check that you are running your tests in a supported CI provider. For a list of supported CI providers, see [Running tests inside a container][7]. Only these CI providers can extract the information to enrich the test metadata with CI information. +3. If you still don't see the wall time, contact [Datadog support][2] for help. + +## The tests wall time is not what is expected + +### How wall time is calculated +The wall time is defined as the time difference between the start time of the first test and the end time of the last test for the given pipeline. + +This is done using the following algorithm: + +1. Compute a hash based on CI information to group the tests. + 1. If the tests include `ci.job.url`, use this tag to calculate the hash. + 2. If the tests don't include `ci.job.url`, use `ci.pipeline.id` + `ci.pipeline.name` + `ci.pipeline.number` to calculate the hash. +2. The calculated wall time is associated to a given hash. **Note**: If there are multiple jobs that execute tests, the wall time is calculated for each job, and the maximum from all calculated wall times is shown. + +### Possible issues with wall time calculation +If you're using a library for testing time-dependent code, like [timecop][8] for Ruby or [FreezeGun][9] for Python, it is possible that test timestamps are wrong, and therefore calculated wall times. If this is the case, make sure that modifications to time are rolled back before finishing your tests. + +## The test status numbers are not what is expected + +The test status numbers are calculated based on the unique tests that were collected. The uniqueness of a test is defined not only by its suite and name, but by its test parameters and test configurations as well. + +### The numbers are lower than expected + +If the numbers are lower than expected, it is likely that either the library or the tool you are using to collect test data cannot collect test parameters and/or some test configurations. + +1. If you are uploading JUnit test report files: + 1. If you are running the same tests in different environment configurations, [make sure you are setting those configuration tags during the upload][10]. + 2. If you are running parameterized tests, it's very likely that the JUnit report does not have that information. [Try using a native library to report test data][3]. +2. If you still don't see the expected results, [contact Datadog support][2] for troubleshooting help. + +### The passed/failed/skipped numbers are different than expected + +If the same test is collected several times for the same commit but with different status, the aggregated result follows the algorithm in the table below: + +| **Test Status - First Try** | **Test Status - Retry #1** | **Result** | +|-----------------------------|----------------------------|------------| +| `Passed` | `Passed` | `Passed` | +| `Passed` | `Failed` | `Passed` | +| `Passed` | `Skipped` | `Passed` | +| `Failed` | `Passed` | `Passed` | +| `Failed` | `Failed` | `Failed` | +| `Failed` | `Skipped` | `Failed` | +| `Skipped` | `Passed` | `Passed` | +| `Skipped` | `Failed` | `Failed` | +| `Skipped` | `Skipped` | `Skipped` | + +## The default branch is not correct + +### How it impacts the product + +The default branch is used to power some features of the products, namely: + +- Default branches list on the Tests page: This list only displays default branches. Setting the wrong default branch can result in missing or incorrect data in the default branches list. + +- Wall time comparison for non-default branches: On the Tests page, in the Branches view, the **VS Default** column is calculated by comparing wall time for the current branch against wall time for the default branch. + +- New flaky tests: Tests that are not currently classified as flaky in the default branch. If the default branch is not properly set, this could lead to a wrong number of detected new flaky tests. + +- Pipelines list: The pipelines list only displays default branches. Setting the wrong default branch can result in missing or incorrect data in the pipelines list. + +### How to fix the default branch + +If you have admin access, you can update it from the [Repository Settings Page][11]. + + + +## Further reading + +{{< partial name="whats-next/whats-next.html" >}} + +[1]: https://www.jenkins.io/doc/book/system-administration/viewing-logs/ +[2]: /help/ +[3]: /continuous_integration/tests/ +[4]: https://app.datadoghq.com/ci/test-runs +[5]: https://app.datadoghq.com/ci/test-services +[6]: /tracing/troubleshooting/tracer_debug_logs +[7]: /continuous_integration/tests/containers/ +[8]: https://github.com/travisjeffery/timecop +[9]: https://github.com/spulec/freezegun +[10]: /continuous_integration/tests/junit_upload/?tabs=linux#collecting-environment-configuration-metadata +[11]: https://app.datadoghq.com/ci/settings/repository +[12]: /continuous_integration/intelligent_test_runner/ \ No newline at end of file