Skip to content

nightly

nightly #310

Workflow file for this run

name: nightly
on:
workflow_dispatch: # Allows manual triggering of the workflow
schedule:
- cron: '0 4 * * *' # Runs automatically at 4:00 AM UTC every day
permissions:
id-token: write
issues: write
contents: read
pull-requests: read
concurrency:
group: single-acceptance-job-per-repo
cancel-in-progress: false # don't cancel ongoing runs to ensure fixtures are completed and resources terminated
jobs:
integration:
environment: tool
runs-on: larger
steps:
- name: Checkout Code
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Install Python
uses: actions/setup-python@v5
with:
cache: 'pip'
cache-dependency-path: '**/pyproject.toml'
python-version: '3.12'
- name: Install hatch
run: pip install hatch==1.15.0
- name: Run unit tests and generate test coverage report
run: make test
# Integration tests are run from within tests/integration folder.
# Create .coveragerc with correct relative path to source code.
- name: Prepare code coverage configuration for integration tests
run: |
cat > tests/integration/.coveragerc << EOF
[run]
source = ../../src
relative_files = true
EOF
# Run tests from `tests/integration` as defined in .codegen.json
# and generate code coverage for modules defined in .coveragerc
- name: Run integration tests and generate test coverage report
uses: databrickslabs/sandbox/acceptance@acceptance/v0.4.4
with:
vault_uri: ${{ secrets.VAULT_URI }}
timeout: 2h
create_issues: false # if enabled, it creates an issue for each test failure; disabled to reduce noise
codegen_path: tests/integration/.codegen.json
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
ARM_CLIENT_ID: ${{ secrets.ARM_CLIENT_ID }}
ARM_TENANT_ID: ${{ secrets.ARM_TENANT_ID }}
COVERAGE_FILE: ${{ github.workspace }}/.coverage # make sure the coverage report is preserved
- name: Merge coverage reports and convert them to XML
run: |
hatch run combine_coverage
# collects all coverage reports
- name: Publish test coverage
uses: codecov/codecov-action@v5
with:
use_oidc: true
integration_serverless:
environment: tool
runs-on: larger
env:
DATABRICKS_SERVERLESS_COMPUTE_ID: auto
steps:
- name: Checkout Code
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Install Python
uses: actions/setup-python@v5
with:
cache: 'pip'
cache-dependency-path: '**/pyproject.toml'
python-version: '3.12'
- name: Install hatch
run: pip install hatch==1.15.0
# Integration tests are run from within tests/integration folder.
# Create .coveragerc with correct relative path to source code.
- name: Prepare code coverage configuration for integration tests
run: |
cat > tests/integration/.coveragerc << EOF
[run]
source = ../../src
relative_files = true
EOF
- name: Run integration tests on serverless cluster
uses: databrickslabs/sandbox/acceptance@acceptance/v0.4.4
with:
vault_uri: ${{ secrets.VAULT_URI }}
timeout: 2h
create_issues: false
codegen_path: tests/integration/.codegen.json
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
ARM_CLIENT_ID: ${{ secrets.ARM_CLIENT_ID }}
ARM_TENANT_ID: ${{ secrets.ARM_TENANT_ID }}
DATABRICKS_SERVERLESS_COMPUTE_ID: ${{ env.DATABRICKS_SERVERLESS_COMPUTE_ID }}
COVERAGE_FILE: ${{ github.workspace }}/.coverage # make sure the coverage report is preserved
- name: Merge coverage reports and convert them to XML
run: |
hatch run combine_coverage
# collects all coverage reports
- name: Publish test coverage
uses: codecov/codecov-action@v5
with:
use_oidc: true
e2e:
environment: tool
runs-on: larger
steps:
- name: Checkout Code
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Install Python
uses: actions/setup-python@v5
with:
cache: 'pip'
cache-dependency-path: '**/pyproject.toml'
python-version: '3.12'
- name: Install hatch
run: pip install hatch==1.15.0
- name: Install dbt
run: |
pip install dbt-core==1.10.9 dbt-databricks==1.10.9
- name: Install Databricks CLI
run: |
curl -fsSL https://raw.githubusercontent.com/databricks/setup-cli/main/install.sh | sh
databricks --version
- name: Azure login (OIDC)
uses: azure/login@v2
with:
client-id: ${{ secrets.ARM_CLIENT_ID }}
tenant-id: ${{ secrets.ARM_TENANT_ID }}
allow-no-subscriptions: true
- name: Set env vars for Azure CLI auth
run: |
val=$(az keyvault secret show --id "${{ secrets.VAULT_URI }}/secrets/DATABRICKS-HOST" --query value -o tsv)
echo "DATABRICKS_HOST=$val" >> $GITHUB_ENV
echo "DATABRICKS_AUTH_TYPE=azure-cli" >> $GITHUB_ENV
- name: Run e2e tests
uses: databrickslabs/sandbox/acceptance@acceptance/v0.4.4
with:
vault_uri: ${{ secrets.VAULT_URI }}
timeout: 2h
create_issues: false
codegen_path: tests/e2e/.codegen.json
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
ARM_CLIENT_ID: ${{ secrets.ARM_CLIENT_ID }}
ARM_TENANT_ID: ${{ secrets.ARM_TENANT_ID }}
e2e_serverless:
environment: tool
runs-on: larger
env:
DATABRICKS_SERVERLESS_COMPUTE_ID: auto
steps:
- name: Checkout Code
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Install Python
uses: actions/setup-python@v5
with:
cache: 'pip'
cache-dependency-path: '**/pyproject.toml'
python-version: '3.12'
- name: Install hatch
run: pip install hatch==1.15.0
- name: Install dbt
run: |
pip install dbt-core==1.10.9 dbt-databricks==1.10.9
- name: Install Databricks CLI
run: |
curl -fsSL https://raw.githubusercontent.com/databricks/setup-cli/main/install.sh | sh
databricks --version
- name: Azure login (OIDC)
uses: azure/login@v2
with:
client-id: ${{ secrets.ARM_CLIENT_ID }}
tenant-id: ${{ secrets.ARM_TENANT_ID }}
allow-no-subscriptions: true
- name: Set env vars for Azure CLI auth
run: |
val=$(az keyvault secret show --id "${{ secrets.VAULT_URI }}/secrets/DATABRICKS-HOST" --query value -o tsv)
echo "DATABRICKS_HOST=$val" >> $GITHUB_ENV
echo "DATABRICKS_AUTH_TYPE=azure-cli" >> $GITHUB_ENV
- name: Run e2e tests on serverless cluster
uses: databrickslabs/sandbox/acceptance@acceptance/v0.4.4
with:
vault_uri: ${{ secrets.VAULT_URI }}
timeout: 2h
create_issues: false
codegen_path: tests/e2e/.codegen.json
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
ARM_CLIENT_ID: ${{ secrets.ARM_CLIENT_ID }}
ARM_TENANT_ID: ${{ secrets.ARM_TENANT_ID }}
DATABRICKS_SERVERLESS_COMPUTE_ID: ${{ env.DATABRICKS_SERVERLESS_COMPUTE_ID }}
benchmark:
environment: tool
runs-on: larger
env:
DATABRICKS_SERVERLESS_COMPUTE_ID: auto
BENCHMARKS_DIR: tests/perf/.benchmarks
steps:
- name: Checkout Code
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Install Python
uses: actions/setup-python@v5
with:
cache: 'pip'
cache-dependency-path: '**/pyproject.toml'
python-version: '3.12'
- name: Install hatch
run: pip install hatch==1.15.0
- name: Login to Azure for azure-cli authentication
uses: azure/login@v2
with:
client-id: ${{ secrets.ARM_CLIENT_ID }}
tenant-id: ${{ secrets.ARM_TENANT_ID }}
allow-no-subscriptions: true
- name: Run performance tests and compare with baseline
timeout-minutes: 120
env:
DATABRICKS_SERVERLESS_COMPUTE_ID: ${{ env.DATABRICKS_SERVERLESS_COMPUTE_ID }}
run: |
export DATABRICKS_HOST=$(az keyvault secret show --id "${{ secrets.VAULT_URI }}/secrets/DATABRICKS-HOST" --query value -o tsv)
export DATABRICKS_AUTH_TYPE=azure-cli
# We are not using acceptance action as it does not show the comparison results.
# The run fails if performance degrades by more than 25%.
# Tests are run sequentially to reduce variability.
# Do at least 5 rounds to get more stable results.
hatch run pytest tests/perf -v -n 1 \
--benchmark-storage=$BENCHMARKS_DIR \
--benchmark-compare=baseline \
--benchmark-compare-fail=mean:25% \
--benchmark-min-rounds=5