Skip to content

fix: Add USB diagnostics utility to troubleshoot unreadable serial nu… #643

fix: Add USB diagnostics utility to troubleshoot unreadable serial nu…

fix: Add USB diagnostics utility to troubleshoot unreadable serial nu… #643

name: Comprehensive Test Suite
on:
push:
branches: [ main, develop, 'claude/**' ]
pull_request:
branches: [ main, develop ]
workflow_dispatch:
env:
PYTHON_VERSION_DEFAULT: '3.11'
# Coverage baseline: 26% (established 2025-01-14)
# Target: Increase by 1% per quarter to reach 30% by Q2 2025
MIN_COVERAGE_PERCENT: 26
TARGET_COVERAGE_PERCENT: 80
jobs:
# ====================================================================================
# UNIT TESTS - Fast, isolated component tests
# ====================================================================================
unit-tests:
name: Unit Tests (Python ${{ matrix.python-version }})
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ['3.10', '3.11']
fail-fast: false
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
cache: 'pip'
- name: Install system dependencies for Qt
run: |
sudo apt-get update
sudo apt-get install -y \
libegl1 \
libxkbcommon-x11-0 \
libxcb-icccm4 \
libxcb-image0 \
libxcb-keysyms1 \
libxcb-randr0 \
libxcb-render-util0 \
libxcb-xinerama0 \
libxcb-xfixes0 \
x11-utils \
xvfb
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -r shared/requirements.txt
pip install -r server/requirements.txt
pip install -r requirements-test.txt
# Install additional runtime dependencies needed for tests
pip install scipy zeroconf apscheduler tzlocal psutil qasync email-validator bcrypt pyjwt
# Install PyQt6 and PyQt6-Charts for GUI tests (requires system libraries in CI)
pip install PyQt6 PyQt6-Charts || echo "PyQt6 installation failed (expected in headless CI)"
- name: Run unit tests with coverage
env:
LABLINK_ENABLE_MOCK_EQUIPMENT: 'true'
PYTHONPATH: ${{ github.workspace }}
QT_QPA_PLATFORM: offscreen
run: |
# Use xvfb-run for GUI tests (headless X server)
xvfb-run -a pytest tests/unit/ \
-v \
--cov=server \
--cov=client \
--cov=shared \
--cov-report=xml \
--cov-report=term-missing \
--cov-report=html \
--junitxml=junit/test-results-${{ matrix.python-version }}.xml \
--tb=short
- name: Check coverage threshold
run: |
coverage report --fail-under=${{ env.MIN_COVERAGE_PERCENT }}
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v4
with:
files: ./coverage.xml
flags: unit-tests,python-${{ matrix.python-version }}
name: unit-${{ matrix.python-version }}
fail_ci_if_error: false
- name: Upload test results
uses: actions/upload-artifact@v4
if: always()
with:
name: test-results-unit-${{ matrix.python-version }}
path: junit/test-results-*.xml
- name: Upload coverage HTML
uses: actions/upload-artifact@v4
if: always()
with:
name: coverage-html-unit-${{ matrix.python-version }}
path: htmlcov/
# ====================================================================================
# API ENDPOINT TESTS - Test all REST API endpoints
# ====================================================================================
api-tests:
name: API Endpoint Tests
runs-on: ubuntu-latest
needs: unit-tests
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION_DEFAULT }}
cache: 'pip'
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -r shared/requirements.txt
pip install -r server/requirements.txt
pip install -r requirements-test.txt
# Install additional runtime dependencies needed for tests
pip install scipy zeroconf apscheduler tzlocal psutil qasync email-validator bcrypt pyjwt
# Install PyQt6 and PyQt6-Charts for GUI tests (requires system libraries in CI)
pip install PyQt6 PyQt6-Charts || echo "PyQt6 installation failed (expected in headless CI)"
- name: Run API tests with coverage
env:
LABLINK_ENABLE_MOCK_EQUIPMENT: 'true'
PYTHONPATH: ${{ github.workspace }}
run: |
pytest tests/api/ \
-v \
-m api \
--cov=server/api \
--cov-report=xml \
--cov-report=term-missing \
--cov-report=html \
--junitxml=junit/test-results-api.xml \
--tb=short \
|| echo "Some API tests may fail if endpoints are not yet implemented"
- name: Upload API coverage
uses: codecov/codecov-action@v4
with:
files: ./coverage.xml
flags: api-tests
name: api-endpoints
fail_ci_if_error: false
- name: Upload test results
uses: actions/upload-artifact@v4
if: always()
with:
name: test-results-api
path: junit/test-results-api.xml
# ====================================================================================
# INTEGRATION TESTS - Test component interactions
# ====================================================================================
integration-tests:
name: Integration Tests
runs-on: ubuntu-latest
needs: unit-tests
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION_DEFAULT }}
cache: 'pip'
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -r shared/requirements.txt
pip install -r server/requirements.txt
pip install -r requirements-test.txt
# Install additional runtime dependencies needed for tests
pip install scipy zeroconf apscheduler tzlocal psutil qasync email-validator bcrypt pyjwt
# Install PyQt6 and PyQt6-Charts for GUI tests (requires system libraries in CI)
pip install PyQt6 PyQt6-Charts || echo "PyQt6 installation failed (expected in headless CI)"
- name: Start server with mock equipment
env:
LABLINK_ENABLE_MOCK_EQUIPMENT: 'true'
PYTHONPATH: ${{ github.workspace }}
run: |
python -m server.main &
SERVER_PID=$!
echo "SERVER_PID=$SERVER_PID" >> $GITHUB_ENV
# Wait for server to start (max 30 seconds)
echo "Waiting for server to start..."
for i in {1..30}; do
if curl -s http://localhost:8000/health > /dev/null; then
echo "✅ Server is up!"
break
fi
echo "Waiting... ($i/30)"
sleep 1
done
- name: Run integration tests
env:
PYTHONPATH: ${{ github.workspace }}
run: |
pytest tests/integration/ \
-v \
-m integration \
--timeout=60 \
--junitxml=junit/test-results-integration.xml \
--tb=short
- name: Stop server
if: always()
run: |
if [ ! -z "$SERVER_PID" ]; then
kill $SERVER_PID || true
fi
- name: Upload test results
uses: actions/upload-artifact@v4
if: always()
with:
name: test-results-integration
path: junit/test-results-integration.xml
# ====================================================================================
# CODE QUALITY - Linting, formatting, type checking
# ====================================================================================
code-quality:
name: Code Quality Checks
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION_DEFAULT }}
cache: 'pip'
- name: Install linting tools
run: |
python -m pip install --upgrade pip
pip install flake8 black isort mypy pylint
- name: Lint with flake8 (strict)
continue-on-error: true
run: |
# Check for Python syntax errors and undefined names
flake8 server/ client/ shared/ \
--count \
--select=E9,F63,F7,F82 \
--show-source \
--statistics || echo "⚠️ Linting issues found (non-blocking)"
- name: Lint with flake8 (warnings)
continue-on-error: true
run: |
flake8 server/ client/ shared/ \
--count \
--exit-zero \
--max-complexity=15 \
--max-line-length=127 \
--statistics
- name: Check code formatting with black
continue-on-error: true
run: |
black --check --diff server/ client/ shared/ || {
echo "⚠️ Code formatting issues found. Run 'black server/ client/ shared/' to fix (non-blocking)"
}
- name: Check import sorting with isort
continue-on-error: true
run: |
isort --check-only --diff server/ client/ shared/ || {
echo "⚠️ Import sorting issues found. Run 'isort server/ client/ shared/' to fix (non-blocking)"
}
- name: Type check with mypy
continue-on-error: true
run: |
mypy server/ --ignore-missing-imports || echo "Type checking found issues (non-blocking)"
# ====================================================================================
# SECURITY SCANNING - Dependency vulnerabilities and code security
# ====================================================================================
security-scan:
name: Security Scanning
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION_DEFAULT }}
cache: 'pip'
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -r shared/requirements.txt
pip install -r server/requirements.txt
pip install safety bandit pip-audit
- name: Security audit with pip-audit
run: |
# Allow specific known vulnerabilities
# - pip 24.0 (GHSA-4xh5-x5gv-qwph): Dev/CI only, trusted sources
# - ecdsa 0.19.1 (GHSA-wj6h-64fc-37mp): Orphaned dependency, not used
pip-audit --desc --ignore-vuln GHSA-4xh5-x5gv-qwph --ignore-vuln GHSA-wj6h-64fc-37mp || {
echo "❌ CRITICAL: Security vulnerabilities found in dependencies"
echo "Review docs/security/phase3_security_audit.md for details"
exit 1
}
- name: Security audit with safety
continue-on-error: true # Advisory only - pip-audit is primary gate
run: |
safety check --json || echo "⚠️ Safety check found vulnerabilities (advisory only)"
- name: Security scan with bandit
continue-on-error: true # Advisory only - low severity findings don't block
run: |
bandit -r server/ client/ -f json -o bandit-report.json || echo "⚠️ Bandit found security issues (advisory only)"
- name: Upload security scan results
uses: actions/upload-artifact@v4
if: always()
with:
name: security-scan-results
path: bandit-report.json
# ====================================================================================
# PERFORMANCE TESTS - Basic performance benchmarks
# ====================================================================================
performance-tests:
name: Performance Benchmarks
runs-on: ubuntu-latest
if: github.event_name == 'pull_request'
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION_DEFAULT }}
cache: 'pip'
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -r shared/requirements.txt
pip install -r server/requirements.txt
pip install -r requirements-test.txt
# Install additional runtime dependencies needed for tests
pip install scipy zeroconf apscheduler tzlocal psutil qasync email-validator bcrypt pyjwt
# Install PyQt6 and PyQt6-Charts for GUI tests (requires system libraries in CI)
pip install PyQt6 PyQt6-Charts || echo "PyQt6 installation failed (expected in headless CI)"
pip install pytest-benchmark
- name: Run performance benchmarks
env:
LABLINK_ENABLE_MOCK_EQUIPMENT: 'true'
PYTHONPATH: ${{ github.workspace }}
run: |
# Run performance tests if they exist
if [ -d "tests/performance" ]; then
pytest tests/performance/ \
-v \
--benchmark-only \
--benchmark-json=benchmark-results.json \
|| echo "⚠️ Performance tests not yet implemented"
else
echo "ℹ️ No performance tests directory found"
fi
- name: Upload benchmark results
uses: actions/upload-artifact@v4
if: always()
with:
name: benchmark-results
path: benchmark-results.json
# ====================================================================================
# TEST REPORT - Generate comprehensive test report
# ====================================================================================
test-report:
name: Generate Test Report
runs-on: ubuntu-latest
needs: [unit-tests, api-tests, integration-tests, code-quality, security-scan]
if: always()
permissions:
checks: write
pull-requests: write
contents: read
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Download all test results
uses: actions/download-artifact@v4
with:
path: test-artifacts
- name: Publish test report
uses: dorny/test-reporter@v1
if: always()
with:
name: Test Results
path: 'test-artifacts/**/test-results-*.xml'
reporter: java-junit
fail-on-error: false
- name: Generate test summary
run: |
echo "# 🧪 Test Suite Summary" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "## Test Results" >> $GITHUB_STEP_SUMMARY
echo "| Test Suite | Result |" >> $GITHUB_STEP_SUMMARY
echo "|------------|--------|" >> $GITHUB_STEP_SUMMARY
echo "| Unit Tests | ${{ needs.unit-tests.result == 'success' && '✅ Passed' || '❌ Failed' }} |" >> $GITHUB_STEP_SUMMARY
echo "| API Tests | ${{ needs.api-tests.result == 'success' && '✅ Passed' || '❌ Failed' }} |" >> $GITHUB_STEP_SUMMARY
echo "| Integration Tests | ${{ needs.integration-tests.result == 'success' && '✅ Passed' || '❌ Failed' }} |" >> $GITHUB_STEP_SUMMARY
echo "| Code Quality | ${{ needs.code-quality.result == 'success' && '✅ Passed' || '❌ Failed' }} |" >> $GITHUB_STEP_SUMMARY
echo "| Security Scan | ${{ needs.security-scan.result == 'success' && '✅ Passed' || '⚠️ Warnings' }} |" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "## Coverage Goals" >> $GITHUB_STEP_SUMMARY
echo "- **Minimum Coverage**: ${{ env.MIN_COVERAGE_PERCENT }}%" >> $GITHUB_STEP_SUMMARY
echo "- **Target Coverage**: ${{ env.TARGET_COVERAGE_PERCENT }}%" >> $GITHUB_STEP_SUMMARY
# ====================================================================================
# FINAL STATUS CHECK - Ensure all critical tests passed
# ====================================================================================
all-tests-passed:
name: All Tests Status
runs-on: ubuntu-latest
needs: [unit-tests, api-tests, integration-tests, code-quality, security-scan]
if: always()
steps:
- name: Check all test results
run: |
echo "========================================="
echo " FINAL TEST STATUS"
echo "========================================="
echo ""
echo "Unit Tests: ${{ needs.unit-tests.result }}"
echo "API Tests: ${{ needs.api-tests.result }}"
echo "Integration Tests: ${{ needs.integration-tests.result }}"
echo "Code Quality: ${{ needs.code-quality.result }}"
echo "Security Scan: ${{ needs.security-scan.result }}"
echo ""
# Unit tests and security scans must pass
if [[ "${{ needs.unit-tests.result }}" != "success" ]]; then
echo "❌ Unit tests failed - BLOCKING"
exit 1
fi
if [[ "${{ needs.security-scan.result }}" != "success" ]]; then
echo "❌ Security scan failed - BLOCKING"
echo "Review docs/security/phase3_security_audit.md for details"
exit 1
fi
if [[ "${{ needs.code-quality.result }}" != "success" ]]; then
echo "⚠️ Code quality checks had issues - NON-BLOCKING (advisory only)"
fi
# Integration and API tests are non-blocking (may have incomplete implementations)
if [[ "${{ needs.integration-tests.result }}" != "success" ]]; then
echo "⚠️ Integration tests failed - NON-BLOCKING"
fi
if [[ "${{ needs.api-tests.result }}" != "success" ]]; then
echo "⚠️ API tests failed - NON-BLOCKING (some endpoints may not be implemented)"
fi
echo ""
echo "========================================="
echo "✅ All critical tests passed!"
echo "========================================="