Movatterモバイル変換


[0]ホーム

URL:


Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

feat: new kv table for kv store#28321

feat: new kv table for kv store

feat: new kv table for kv store #28321

Workflow file for this run

name:Playwright UI Tests
on:
push:
branches:
-"main"
# paths-ignore:
# - "**.md"
# - "**.yml"
# - "**.yaml"
pull_request:
branches:
-"*"
# paths-ignore:
# - "**.md"
# - "**.yml"
# - "**.yaml"
concurrency:
group:${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress:true
env:
COLUMNS:150
ZO_ROOT_USER_EMAIL:root@example.com
ZO_ROOT_USER_PASSWORD:Complexpass#123
ZO_BASE_URL:http://localhost:5080
WS_ZO_BASE_URL:ws://localhost:5080
ZO_BASE_URL_SC:http://localhost:5080
ZO_BASE_URL_SC_UI:http://localhost:5080
INGESTION_URL:http://localhost:5080
ORGNAME:default
ZO_QUICK_MODE_NUM_FIELDS:100
ZO_QUICK_MODE_STRATEGY:first
ZO_ALLOW_USER_DEFINED_SCHEMAS:true
ZO_INGEST_ALLOWED_UPTO:5
ZO_FEATURE_QUERY_EXCLUDE_ALL:false
ZO_USAGE_BATCH_SIZE:200
ZO_USAGE_PUBLISH_INTERVAL:2
ZO_USAGE_REPORTING_ENABLED:true
ZO_MIN_AUTO_REFRESH_INTERVAL:5
ZO_STREAMING_ENABLED:true
ZO_COLS_PER_RECORD_LIMIT:"80000"
ZO_SMTP_ENABLED:true
ZO_FORMAT_STREAM_NAME_TO_LOWERCASE:false
ZO_CREATE_ORG_THROUGH_INGESTION:true
ZO_UTF8_VIEW_ENABLED:false
UPLOAD_TO_TESTDINO:${{ vars.UPLOAD_TO_TESTDINO || 'false' }}
jobs:
build_binary:
name:build_binary
runs-on:
labels:repo-openobserve-standard-16
steps:
-name:Remove unused tools
run:|
sudo rm -rf /usr/share/dotnet
sudo rm -rf /opt/ghc
sudo rm -rf "/usr/local/share/boost"
sudo rm -rf "$AGENT_TOOLSDIRECTORY"
-name:Clone the current repo
uses:actions/checkout@v5
with:
fetch-depth:0
-name:Setup Rust Toolchain
uses:dtolnay/rust-toolchain@master
with:
toolchain:nightly-2025-11-11
targets:x86_64-unknown-linux-gnu
-uses:Swatinem/rust-cache@v2
with:
cache-on-failure:true
prefix-key:playwright
-name:Install Protoc
uses:arduino/setup-protoc@v3
with:
version:"21.12"
-uses:actions/setup-node@v5
with:
node-version:22
-name:Build frontend code
env:
NODE_OPTIONS:"--max-old-space-size=8192"
VITE_COVERAGE:"true"
COVERAGE:"true"
run:cd web && npm install && npm run build
-name:Build and run binary
run:cargo build --release --target x86_64-unknown-linux-gnu
-name:Upload artifacts
uses:actions/upload-artifact@v4
with:
name:release-binary
path:target/x86_64-unknown-linux-gnu/release/openobserve
retention-days:1
ui_integration_tests:
name:e2e / ${{ matrix.testfolder }}
needs:[build_binary]
runs-on:
labels:repo-openobserve-standard-8
# container:
# image: mcr.microsoft.com/playwright:v1.50.0-jammy
# options: --user root
strategy:
matrix:
include:
-testfolder:"GeneralTests"
browser:"chrome"
run_files:
[
"sanity.spec.js",
"changeOrg.spec.js",
"enrichment.spec.js",
"schema.spec.js",
"schemaload.spec.js",
"serviceAccount.spec.js",
"usersOrg.spec.js",
"org.spec.js",
"theme-management.spec.js",
"ingestion-config.spec.js",
]
-testfolder:"Logs-Core"
browser:"chrome"
run_files:
[
"join.spec.js",
"logshistogram.spec.js",
"logspage.spec.js",
"logsquickmode.spec.js",
"pagination.spec.js",
"unflattened.spec.js",
"logstable.spec.js",
"shareLink.spec.js",
]
-testfolder:"Logs-Queries"
browser:"chrome"
run_files:
[
"logsDownloads.spec.js",
"logsqueries.spec.js",
"secondsPrecisionAdded.spec.js",
"logsqueries.cte.spec.js",
"logsqueries.matchall.spec.js",
"searchpartition.spec.js",
]
-testfolder:"Alerts"
browser:"chrome"
run_files:
[
"alerts-e2e-flow.spec.js",
"alerts-ui-operations.spec.js",
"alerts-import.spec.js",
]
-testfolder:"Dashboards-Core"
browser:"chrome"
run_files:
[
"dashboard.spec.js",
"dashboard2.spec.js",
"dashboardtype.spec.js",
"dashboard-folder.spec.js",
"dashboard-import.spec.js",
"maxquery.spec.js",
]
-testfolder:"Dashboards-Settings"
browser:"chrome"
run_files:
[
"dashboard-filter.spec.js",
"dashboard-general-setting.spec.js",
"dashboard-tabs-setting.spec.js",
"dashboard-variables-setting.spec.js",
"dashboard-transpose.spec.js",
]
-testfolder:"Dashboards-Charts"
browser:"chrome"
run_files:[
"custom-charts.spec.js",
"dashboard-geoMap.spec.js",
"dashboard-maps.spec.js",
"dashboard-multi-y-axis.spec.js",
"dashboard-html-chart.spec.js",
"visualize.spec.js",
# "visualization-vrl.spec.js"
]
-testfolder:"Dashboards-Streaming"
browser:"chrome"
run_files:["dashboard-streaming.spec.js",]
-testfolder:"Pipelines"
browser:"chrome"
run_files:[
"pipeline-conditions.spec.js",
"pipeline-conditions-validation.spec.js",
"pipelines.spec.js",
"pipeline-dynamic.spec.js",
"pipeline-core.spec.js",
# "remotepipeline.spec.js"
]
-testfolder:"Reports"
browser:"chrome"
run_files:[
"reportsScheduleNow.spec.js",
"reportsScheduleLater.spec.js"
]
-testfolder:"Streams"
browser:"chrome"
run_files:
[
"multiselect-stream.spec.js",
"streamname.spec.js",
"streaming.spec.js",
"stream-settings.spec.js",
]
-testfolder:"RegressionSet"
browser:"chrome"
run_files:["logs-regression.spec.js", "streams-regression.spec.js"]
steps:
-name:Clone the current repo
uses:actions/checkout@v5
with:
fetch-depth:0
-name:Download artifacts
uses:actions/download-artifact@v5
with:
name:release-binary
path:release-binary
-name:Start OpenObserve
run:chmod +x ./release-binary/openobserve && ./release-binary/openobserve > o2.log 2>&1 &
-name:Wait for start
run:sleep 60
-name:Ensure we are getting a reply from the server
run:curl http://localhost:5080/web/login
-name:Create coverage json folder
run:mkdir -p tests/ui-testing/.nyc_output
-uses:actions/setup-node@v5
with:
node-version:22
-name:Check if this is a rerun
id:check_rerun
env:
GH_TOKEN:${{ github.token }}
run:|
echo "::notice::GitHub run attempt: ${{ github.run_attempt }}"
if [ "${{ github.run_attempt }}" -gt "1" ]; then
# This is a rerun - always try TestDino optimization
echo "is_rerun=true" >> $GITHUB_OUTPUT
echo "::notice::This is a rerun (attempt ${{ github.run_attempt }}). Will attempt to run only failed tests."
# Additionally detect if this is "re-run all" vs "re-run failed" for better handling
echo "::group::Detecting rerun type (all jobs vs failed only)"
CURRENT_ATTEMPT=${{ github.run_attempt }}
# Get all jobs from the run
ALL_JOBS_RESPONSE=$(curl -s -H "Authorization: token $GH_TOKEN" \
-H "Accept: application/vnd.github.v3+json" \
"https://api.github.com/repos/${{ github.repository }}/actions/runs/${{ github.run_id }}/jobs?filter=all&per_page=100")
# Count jobs that are NOT completed (actually running in this attempt)
CURRENT_ATTEMPT_JOBS=$(echo "$ALL_JOBS_RESPONSE" | jq -r "[.jobs[] | select(.name | startswith(\"e2e /\")) | select(.run_attempt == $CURRENT_ATTEMPT) | select(.status != \"completed\")] | length")
# Count total unique matrix job names (from attempt 1)
TOTAL_MATRIX_JOBS=$(echo "$ALL_JOBS_RESPONSE" | jq -r "[.jobs[] | select(.name | startswith(\"e2e /\")) | select(.run_attempt == 1) | .name] | unique | length")
echo "::notice::Total unique matrix job names: $TOTAL_MATRIX_JOBS"
echo "::notice::Jobs running in current attempt $CURRENT_ATTEMPT: $CURRENT_ATTEMPT_JOBS"
# Store rerun type for potential use
if [ "$CURRENT_ATTEMPT_JOBS" -eq "$TOTAL_MATRIX_JOBS" ]; then
echo "::notice::✅ DETECTED: Re-run ALL jobs (all $TOTAL_MATRIX_JOBS matrix jobs running)"
echo "rerun_type=all" >> $GITHUB_OUTPUT
else
echo "::notice::✅ DETECTED: Re-run FAILED jobs only ($CURRENT_ATTEMPT_JOBS out of $TOTAL_MATRIX_JOBS jobs)"
echo "rerun_type=failed" >> $GITHUB_OUTPUT
fi
echo "::endgroup::"
else
echo "is_rerun=false" >> $GITHUB_OUTPUT
echo "rerun_type=none" >> $GITHUB_OUTPUT
echo "::notice::This is the first run. Will run all tests in matrix."
fi
-name:Install dependencies and run ui-tests
env:
TESTDINO_TOKEN:${{ secrets.TESTDINO_API_TOKEN }}
GH_TOKEN:${{ github.token }}
run:|
touch .env
echo "ZO_ROOT_USER_EMAIL=${ZO_ROOT_USER_EMAIL}" >> .env
echo "ZO_ROOT_USER_PASSWORD=${ZO_ROOT_USER_PASSWORD}" >> .env
echo "ZO_BASE_URL=${ZO_BASE_URL}" >> .env
echo "WS_ZO_BASE_URL=${WS_ZO_BASE_URL}" >> .env
echo "ZO_BASE_URL_SC=${ZO_BASE_URL_SC}" >> .env
echo "ZO_BASE_URL_SC_UI=${ZO_BASE_URL_SC_UI}" >> .env
echo "INGESTION_URL=${INGESTION_URL}" >> .env
echo "ORGNAME=${ORGNAME}" >> .env
echo "ZO_SMTP_ENABLED=${ZO_SMTP_ENABLED}" >> .env
mv .env tests/ui-testing
cd tests/ui-testing && npm ci
npx playwright install --with-deps chromium
# Check if this is a rerun
IS_RERUN="${{ steps.check_rerun.outputs.is_rerun }}"
RERUN_TYPE="${{ steps.check_rerun.outputs.rerun_type }}"
if [ "$IS_RERUN" = "true" ]; then
# Check if this is "re-run all" - if so, skip TestDino and run all tests
if [ "$RERUN_TYPE" = "all" ]; then
echo "::notice::🔄 Re-run ALL jobs detected - skipping TestDino optimization"
echo "::notice::Will run ALL tests in this shard regardless of previous failures"
RERUN_MODE="normal"
EXTRA_PW_FLAGS=""
else
# This is "re-run failed" - use TestDino optimization
echo "::group::Fetching last failed tests from TestDino"
# Use custom cache ID that includes run_id to distinguish separate workflow runs
# Format: gh_openobserve_main_12345678 (for main) or gh_openobserve_feature-branch_12345678 (for PRs)
# Sanitize branch name by replacing slashes with underscores
BRANCH_NAME=$(echo "${{ github.ref_name }}" | sed 's/\//_/g')
CACHE_ID="gh_openobserve_${BRANCH_NAME}_${{ github.run_id }}"
echo "::notice::Using cache ID: $CACHE_ID"
# Try to get last failed tests from TestDino
# Use a temp file to avoid shell variable truncation for large output
TEMP_OUTPUT="/tmp/testdino_output_$$.txt"
if npx tdpw last-failed --cache-id "$CACHE_ID" > "$TEMP_OUTPUT" 2>&1; then
# Filter out ALL lines that don't match test file pattern
# Valid format: "FolderName/testfile.spec.js" or "FolderName/testfile.spec.js -g test name"
# This filters out debug lines, empty lines, and anything that's not a test path
LAST_FAILED_FLAGS="$(grep -E '^[A-Za-z_-]+/[a-zA-Z0-9_-]+\.spec\.js' "$TEMP_OUTPUT" | tail -1)"
rm -f "$TEMP_OUTPUT"
if [ -n "$LAST_FAILED_FLAGS" ] && [ "$LAST_FAILED_FLAGS" != "No failed tests found" ]; then
echo "::notice::✅ Found failed tests from previous run."
echo "::notice::TestDino flags: $LAST_FAILED_FLAGS"
RERUN_MODE="optimized"
EXTRA_PW_FLAGS="$LAST_FAILED_FLAGS"
else
echo "::warning::TestDino returned no failed tests. Falling back to running all tests in this matrix job."
echo "::warning::This means you'll be billed for all tests. Please investigate why last-failed returned empty."
RERUN_MODE="fallback"
EXTRA_PW_FLAGS=""
fi
else
echo "::error::Failed to fetch last failed tests from TestDino (exit code: $?)."
echo "::warning::Falling back to running all tests in this matrix job."
echo "::warning::This means you'll be billed for all tests. Please check TestDino integration."
RERUN_MODE="fallback"
EXTRA_PW_FLAGS=""
fi
echo "::endgroup::"
fi
else
RERUN_MODE="normal"
EXTRA_PW_FLAGS=""
fi
# Get list of files to run using join function
FILE_LIST="${{ join(matrix.run_files, ' ') }}"
echo "DEBUG: FILE_LIST = $FILE_LIST"
echo "DEBUG: matrix.testfolder = ${{ matrix.testfolder }}"
if [ -n "$FILE_LIST" ]; then
# Map logical folder names to actual directories
case "${{ matrix.testfolder }}" in
"Logs-Core"|"Logs-Queries")
ACTUAL_FOLDER="Logs"
;;
"Dashboards-Core"|"Dashboards-Settings"|"Dashboards-Charts"|"Dashboards-Streaming")
ACTUAL_FOLDER="Dashboards"
;;
*)
ACTUAL_FOLDER="${{ matrix.testfolder }}"
;;
esac
echo "DEBUG: ACTUAL_FOLDER = $ACTUAL_FOLDER"
# Build file paths
FILE_PATHS=""
for file in $FILE_LIST; do
FILE_PATH="./playwright-tests/$ACTUAL_FOLDER/$file"
echo "DEBUG: Will run file: $FILE_PATH"
FILE_PATHS="$FILE_PATHS $FILE_PATH"
done
# Determine what to run based on rerun mode
if [ "$RERUN_MODE" = "optimized" ]; then
# TestDino returns ALL failed tests from the previous run
# Format examples:
# Single: "SDR/test.spec.js -g test name"
# Multiple: "Logs/test1.spec.js -g name1 Pipelines/test2.spec.js -g name2"
echo "DEBUG: Full TestDino output (all failed tests): $EXTRA_PW_FLAGS"
echo "DEBUG: Current shard folder: $ACTUAL_FOLDER"
# Strategy: Extract only tests that start with "$ACTUAL_FOLDER/"
# Use grep to find lines matching our shard folder
SHARD_TESTS=""
# Check if the TestDino output contains tests for this shard (case-insensitive)
# Look for pattern: $ACTUAL_FOLDER/something.spec.js
if echo "$EXTRA_PW_FLAGS" | grep -iq "$ACTUAL_FOLDER/.*\.spec\.js"; then
echo "DEBUG: Found test(s) for this shard in TestDino output"
# IMPORTANT: Based on observation, TestDino returns ONE test at a time
# Format: "Folder/file.spec.js -g test name"
# Simply check if it starts with our folder and pass it through
# Extract file path (everything before -g or end of string) - case-insensitive
TEST_FILE=$(echo "$EXTRA_PW_FLAGS" | grep -ioE "$ACTUAL_FOLDER/[^ ]+\.spec\.js" | head -1)
if [ -n "$TEST_FILE" ]; then
# Check if file actually starts with our folder (case-insensitive)
if [[ "${TEST_FILE,,}" == "${ACTUAL_FOLDER,,}/"* ]]; then
# Extract just the filename from the test file path
TEST_FILENAME=$(basename "$TEST_FILE")
# Check if this file is actually in the current shard's run_files list
if echo "$FILE_LIST" | grep -qw "$TEST_FILENAME"; then
# Parse out the file path and test name separately for proper quoting
# Extract file path (before -g)
TEST_FILE_PATH=$(echo "$EXTRA_PW_FLAGS" | sed "s/ -g.*//" | sed "s|^|./playwright-tests/|")
# Check if there's a -g flag
if echo "$EXTRA_PW_FLAGS" | grep -q " -g "; then
# Extract test name (after -g) and strip any surrounding quotes
TEST_NAME=$(echo "$EXTRA_PW_FLAGS" | sed 's/.*-g //' | sed 's/^"\(.*\)"$/\1/')
echo "DEBUG: File path: $TEST_FILE_PATH"
echo "DEBUG: Test name (unquoted): $TEST_NAME"
echo "::notice::🎯 OPTIMIZED RERUN: Running failed test from this shard"
echo "DEBUG: Final command: npx playwright test \"$TEST_FILE_PATH\" -g \"$TEST_NAME\""
# Properly quote the test name to handle spaces and special chars
npx playwright test "$TEST_FILE_PATH" -g "$TEST_NAME"
else
# No -g flag, just run the file
echo "DEBUG: File path: $TEST_FILE_PATH (no -g flag)"
echo "::notice::🎯 OPTIMIZED RERUN: Running failed test file from this shard"
echo "DEBUG: Final command: npx playwright test \"$TEST_FILE_PATH\""
npx playwright test "$TEST_FILE_PATH"
fi
else
echo "::notice::⏭️ Test file '$TEST_FILENAME' not in this shard's run_files list."
echo "::notice::Checking if this shard was cancelled/abruptly ended in previous attempt..."
# Check the status of this matrix job in the previous attempt
PREV_ATTEMPT=$((${{ github.run_attempt }} - 1))
if [ "$PREV_ATTEMPT" -ge 1 ]; then
JOB_NAME="e2e / ${{ matrix.testfolder }}"
API_RESPONSE=$(curl -s -H "Authorization: token $GH_TOKEN" \
-H "Accept: application/vnd.github.v3+json" \
"https://api.github.com/repos/${{ github.repository }}/actions/runs/${{ github.run_id }}/attempts/$PREV_ATTEMPT/jobs" 2>/dev/null)
if [ $? -eq 0 ] && [ -n "$API_RESPONSE" ]; then
PREV_JOB_STATUS=$(echo "$API_RESPONSE" | jq -r ".jobs[] | select(.name == \"$JOB_NAME\") | .conclusion" 2>/dev/null)
if [ "$PREV_JOB_STATUS" = "cancelled" ] || [ "$PREV_JOB_STATUS" = "null" ] || [ -z "$PREV_JOB_STATUS" ]; then
echo "::warning::⚠️ Previous shard was cancelled/abruptly ended - running FULL test suite"
npx playwright test $FILE_PATHS
exit $?
elif [ "$PREV_JOB_STATUS" = "success" ]; then
echo "::notice::✅ Previous shard completed successfully - no tests to rerun"
exit 0
elif [ "$PREV_JOB_STATUS" = "failure" ]; then
echo "::warning::⚠️ Previous shard failed but TestDino has no failures recorded - running FULL test suite"
npx playwright test $FILE_PATHS
exit $?
else
echo "::warning::⚠️ Unknown job status - running FULL test suite for safety"
npx playwright test $FILE_PATHS
exit $?
fi
else
echo "::error::Failed to fetch job status from GitHub API - running FULL test suite"
npx playwright test $FILE_PATHS
exit $?
fi
else
echo "::notice::✅ This is the first run, no previous attempt to check"
echo "::notice::No failed tests to rerun. Exiting successfully."
exit 0
fi
fi
else
echo "::notice::⏭️ Test file doesn't match shard folder exactly. Skipping."
exit 0
fi
else
echo "::warning::Failed to extract test file path. Falling back to skip."
exit 0
fi
else
echo "::notice::⏭️ OPTIMIZED RERUN: No failed tests found for '$ACTUAL_FOLDER' shard in TestDino output"
echo "::notice::Checking if this shard was cancelled/abruptly ended in previous attempt..."
# Check the status of this matrix job in the previous attempt
# Rule: If shard was cancelled/abruptly ended → treat as failed → run full test suite
PREV_ATTEMPT=$((${{ github.run_attempt }} - 1))
if [ "$PREV_ATTEMPT" -ge 1 ]; then
# Job name format: "e2e / {matrix.testfolder}"
JOB_NAME="e2e / ${{ matrix.testfolder }}"
echo "DEBUG: Checking previous attempt #$PREV_ATTEMPT for job: $JOB_NAME"
# Use GitHub REST API to get job status from specific attempt
# This is guaranteed to work and provides attempt-specific data
API_RESPONSE=$(curl -s -H "Authorization: token $GH_TOKEN" \
-H "Accept: application/vnd.github.v3+json" \
"https://api.github.com/repos/${{ github.repository }}/actions/runs/${{ github.run_id }}/attempts/$PREV_ATTEMPT/jobs" 2>/dev/null)
if [ $? -eq 0 ] && [ -n "$API_RESPONSE" ]; then
# Successfully got API response, parse it
PREV_JOB_STATUS=$(echo "$API_RESPONSE" | jq -r ".jobs[] | select(.name == \"$JOB_NAME\") | .conclusion" 2>/dev/null)
echo "DEBUG: Previous job status: $PREV_JOB_STATUS"
if [ "$PREV_JOB_STATUS" = "cancelled" ] || [ "$PREV_JOB_STATUS" = "null" ] || [ -z "$PREV_JOB_STATUS" ]; then
echo "::warning::⚠️ Previous shard was cancelled/abruptly ended (status: ${PREV_JOB_STATUS:-not found})"
echo "::warning::Treating as FAILED - running FULL test suite for this shard"
echo "DEBUG: Running all matrix tests: npx playwright test $FILE_PATHS"
npx playwright test $FILE_PATHS
exit $?
elif [ "$PREV_JOB_STATUS" = "success" ]; then
echo "::notice::✅ Previous shard completed successfully - all tests passed"
echo "::notice::No failed tests to rerun. Exiting successfully."
exit 0
elif [ "$PREV_JOB_STATUS" = "failure" ]; then
echo "::warning::⚠️ Previous shard failed but TestDino has no failures recorded"
echo "::warning::This shouldn't happen - running FULL test suite for safety"
echo "DEBUG: Running all matrix tests: npx playwright test $FILE_PATHS"
npx playwright test $FILE_PATHS
exit $?
else
echo "::warning::⚠️ Unknown job status: $PREV_JOB_STATUS"
echo "::warning::Running FULL test suite for safety"
echo "DEBUG: Running all matrix tests: npx playwright test $FILE_PATHS"
npx playwright test $FILE_PATHS
exit $?
fi
else
echo "::error::Failed to fetch job status from GitHub API"
echo "::warning::Falling back to running FULL test suite for safety"
echo "DEBUG: Running all matrix tests: npx playwright test $FILE_PATHS"
npx playwright test $FILE_PATHS
exit $?
fi
else
echo "::notice::✅ This is the first run, no previous attempt to check"
echo "::notice::No failed tests to rerun. Exiting successfully."
exit 0
fi
fi
else
# Normal run or fallback
if [ "$RERUN_MODE" = "fallback" ]; then
echo "::warning::⚠️ FALLBACK RERUN: Running all tests in matrix (TestDino optimization failed)"
fi
echo "DEBUG: Final command: npx playwright test $FILE_PATHS"
npx playwright test $FILE_PATHS
fi
else
echo "No files specified to run for ${{ matrix.testfolder }} folder"
fi
-name:Sanitize Test Folder Name
shell:bash
run:|
# Assign the matrix variable to a Bash variable
SANITIZED_FOLDERNAME="${{ matrix.testfolder }}"
# Export as a GitHub Actions environment variable
echo "SANITIZED_FOLDERNAME=$SANITIZED_FOLDERNAME" >> $GITHUB_ENV
-name:Upload Coverage Data
uses:actions/upload-artifact@v4
with:
name:playwright-coverage-${{ env.SANITIZED_FOLDERNAME }}-attempt-${{ github.run_attempt }}
path:tests/ui-testing/.nyc_output
include-hidden-files:true
-name:Generate Coverage Report
run:cd tests/ui-testing && npx nyc report
-name:Upload blob report to GitHub Actions Artifacts
if:${{ !cancelled() }}
uses:actions/upload-artifact@v4
with:
name:blob-report-${{ matrix.testfolder }}-attempt-${{ github.run_attempt }}
path:tests/ui-testing/blob-report
retention-days:1
-name:Check OpenObserve logs
if:always()
run:cat o2.log
merge_and_upload_reports:
name:Merge Reports and Upload to TestDino
needs:[ui_integration_tests]
if:${{ !cancelled() && needs.ui_integration_tests.result != 'skipped' }}
runs-on:
labels:repo-openobserve-standard-8
steps:
-name:Clone the current repo
uses:actions/checkout@v5
-name:Setup Node.js
uses:actions/setup-node@v5
with:
node-version:'22'
-name:Install project dependencies
run:cd tests/ui-testing && npm ci
-name:Download blob reports from GitHub Actions Artifacts
uses:actions/download-artifact@v4
continue-on-error:true
id:download_blobs
with:
path:tests/ui-testing/all-blob-reports
pattern:blob-report-*-attempt-${{ github.run_attempt }}
merge-multiple:true
-name:Check if any artifacts were downloaded
run:|
if [ -d "tests/ui-testing/all-blob-reports" ] && [ "$(ls -A tests/ui-testing/all-blob-reports)" ]; then
echo "✅ Artifacts downloaded successfully"
ls -la tests/ui-testing/all-blob-reports
else
echo "⚠️ No artifacts found or download failed completely"
# Don't fail here - let the Merge Reports step handle this
fi
-name:Merge Reports
env:
GH_TOKEN:${{ github.token }}
run:|
cd tests/ui-testing
# Check if directory exists and contains reports
if [ ! -d "all-blob-reports" ] || [ -z "$(ls -A all-blob-reports 2>/dev/null)" ]; then
echo "::warning::No blob reports found to merge."
# Investigate: Check if all test jobs succeeded (meaning they skipped intentionally)
# vs test jobs failed (meaning blob reporter might have failed)
echo "::group::Investigating why no blob reports exist"
JOBS_RESPONSE=$(curl -s -H "Authorization: token $GH_TOKEN" \
-H "Accept: application/vnd.github.v3+json" \
"https://api.github.com/repos/${{ github.repository }}/actions/runs/${{ github.run_id }}/attempts/${{ github.run_attempt }}/jobs")
# Count test matrix jobs by conclusion
SUCCEEDED=$(echo "$JOBS_RESPONSE" | jq -r '[.jobs[] | select(.name | startswith("e2e /")) | select(.conclusion == "success")] | length')
FAILED=$(echo "$JOBS_RESPONSE" | jq -r '[.jobs[] | select(.name | startswith("e2e /")) | select(.conclusion == "failure")] | length')
TOTAL=$(echo "$JOBS_RESPONSE" | jq -r '[.jobs[] | select(.name | startswith("e2e /"))] | length')
echo "::notice::Test job results: $SUCCEEDED succeeded, $FAILED failed, $TOTAL total"
if [ "$SUCCEEDED" -gt 0 ] && [ "$FAILED" -eq 0 ]; then
echo "::notice::✅ All test jobs succeeded - tests likely skipped because they already passed"
echo "::notice::Skipping merge and upload (no reports to process)"
echo "::endgroup::"
exit 0
else
echo "::error::❌ Some/all test jobs failed but no blob reports were found"
echo "::error::This indicates a problem with test execution or blob reporter"
echo "::error::Failed jobs: $FAILED, Succeeded jobs: $SUCCEEDED"
echo "::endgroup::"
exit 1
fi
fi
# Unset CI to use non-CI reporter config (html + json instead of blob)
unset CI
npx playwright merge-reports --config playwright.config.js ./all-blob-reports
echo "Contents of playwright-results:"
ls -la playwright-results/
echo "Contents of html-report:"
ls -la playwright-results/html-report/
echo "Checking report.json:"
ls -lh playwright-results/report.json
-name:Cache test failures to TestDino
if:always()
env:
TESTDINO_TOKEN:${{ secrets.TESTDINO_API_TOKEN }}
run:|
cd tests/ui-testing
echo "::group::Caching test failure metadata to TestDino"
if [ -f "playwright-results/report.json" ]; then
echo "::notice::JSON report found at playwright-results/report.json"
ls -lh playwright-results/report.json
# Copy report.json to repo root to ensure correct git metadata
mkdir -p ../../playwright-results
cp playwright-results/report.json ../../playwright-results/
# Run tdpw cache from repo root directory (for correct git metadata)
cd ../../playwright-results
# Use custom cache ID that includes run_id to distinguish separate workflow runs
# Format: gh_openobserve_main_12345678 (for main) or gh_openobserve_feature-branch_12345678 (for PRs)
# Sanitize branch name by replacing slashes with underscores
BRANCH_NAME=$(echo "${{ github.ref_name }}" | sed 's/\//_/g')
CACHE_ID="gh_openobserve_${BRANCH_NAME}_${{ github.run_id }}"
echo "::notice::Using cache ID: $CACHE_ID"
# Suppress verbose debug output from TestDino, only show result
if CACHE_OUTPUT=$(npx tdpw cache --cache-id "$CACHE_ID" 2>&1); then
# Extract only the success message, filter out debug spam
SUCCESS_MSG=$(echo "$CACHE_OUTPUT" | grep "Cache data submitted successfully" || echo "")
if [ -n "$SUCCESS_MSG" ]; then
echo "::notice::✅ Successfully cached test failure metadata to TestDino cloud"
else
echo "::notice::✅ Cache command completed"
fi
else
echo "::warning::⚠️ Failed to cache test metadata to TestDino"
echo "::warning::Rerun optimization may not work properly"
# Show output only on failure for debugging
echo "$CACHE_OUTPUT"
fi
else
echo "::error::JSON report not found at playwright-results/report.json"
echo "::warning::Cannot cache test failures - skipping"
fi
echo "::endgroup::"
-name:Upload to TestDino
if:${{ (success() || failure()) && env.UPLOAD_TO_TESTDINO == 'true' }}
run:|
cd tests/ui-testing
# Fix startTime format: convert Unix timestamp (number) to ISO 8601 string for TestDino compatibility
if [ -f "playwright-results/report.json" ]; then
# Check if startTime is a number and convert it
START_TIME=$(jq -r '.stats.startTime // empty' playwright-results/report.json)
if [ -n "$START_TIME" ] && [[ "$START_TIME" =~ ^[0-9]+$ ]]; then
echo "Converting startTime from Unix timestamp to ISO 8601 string"
jq '.stats.startTime = (.stats.startTime / 1000 | strftime("%Y-%m-%dT%H:%M:%S.000Z"))' playwright-results/report.json > playwright-results/report.json.tmp
mv playwright-results/report.json.tmp playwright-results/report.json
fi
fi
npx --yes tdpw@latest upload playwright-results \
--json-report playwright-results/report.json \
--html-report playwright-results/html-report \
--upload-html \
--verbose \
--token="${{ secrets.TESTDINO_API_TOKEN }}"
generate_coverage_report:
name:Generate Coverage Report
needs:[build_binary, ui_integration_tests]
runs-on:repo-openobserve-standard-8
if:success() || failure()
steps:
-name:Clone the current repo
uses:actions/checkout@v5
with:
fetch-depth:0
-name:Download Playwright Coverage Data
uses:actions/download-artifact@v5
with:
pattern:playwright-coverage-*-attempt-${{ github.run_attempt }}
path:merged-coverage
merge-multiple:true
-name:Verify Downloaded Files
run:ls -R merged-coverage# Check the structure and contents
-name:Move Coverage Files to .nyc_output
run:|
mkdir -p tests/ui-testing/.nyc_output
mv merged-coverage/* tests/ui-testing/.nyc_output/ || echo "No files to move"
-name:Generate Coverage Report
run:cd tests/ui-testing && npm ci && npx nyc report
-name:Upload Coverage Report
uses:actions/upload-artifact@v4
if:hashFiles('tests/ui-testing/coverage-report/**') != ''
with:
name:coverage-report
path:tests/ui-testing/coverage-report
retention-days:7
playwright_summary:
runs-on:
labels:repo-openobserve-standard-8
permissions:{}
needs:[build_binary, ui_integration_tests, merge_and_upload_reports, generate_coverage_report]
if:always()
steps:
-name:Check test results
run:|
# Check all job results - accept success or skipped for all jobs
BUILD_OK=false
UI_OK=false
MERGE_OK=false
COV_OK=false
if [ "${{ needs.build_binary.result }}" == "success" ] || [ "${{ needs.build_binary.result }}" == "skipped" ]; then
BUILD_OK=true
fi
if [ "${{ needs.ui_integration_tests.result }}" == "success" ] || [ "${{ needs.ui_integration_tests.result }}" == "skipped" ]; then
UI_OK=true
fi
if [ "${{ needs.merge_and_upload_reports.result }}" == "success" ] || [ "${{ needs.merge_and_upload_reports.result }}" == "skipped" ]; then
MERGE_OK=true
fi
if [ "${{ needs.generate_coverage_report.result }}" == "success" ] || [ "${{ needs.generate_coverage_report.result }}" == "skipped" ]; then
COV_OK=true
fi
if [ "$BUILD_OK" == "true" ] && [ "$UI_OK" == "true" ] && [ "$MERGE_OK" == "true" ] && [ "$COV_OK" == "true" ]; then
echo "All Playwright tests completed successfully"
exit 0
else
echo "Playwright tests failed:"
echo " build_binary: ${{ needs.build_binary.result }}"
echo " ui_integration_tests: ${{ needs.ui_integration_tests.result }}"
echo " merge_and_upload_reports: ${{ needs.merge_and_upload_reports.result }}"
echo " generate_coverage_report: ${{ needs.generate_coverage_report.result }}"
exit 1
fi

[8]ページ先頭

©2009-2025 Movatter.jp