Import Cobalt 24.master.0.1032339
diff --git a/.coveragerc b/.coveragerc
new file mode 100644
index 0000000..3b65cb7
--- /dev/null
+++ b/.coveragerc
@@ -0,0 +1,13 @@
+# pycov configuration
+
+[run]
+source=.
+
+omit=
+ # Exclude test files from coverage report
+ *_test.py
+ cobalt/black_box_tests/tests/*
+ cobalt/media_integration_tests/*
+
+ # Exclude scripts in third_party/
+ third_party/*
diff --git a/.github/actions/api_leak_detector/action.yaml b/.github/actions/api_leak_detector/action.yaml
new file mode 100644
index 0000000..bc2d532
--- /dev/null
+++ b/.github/actions/api_leak_detector/action.yaml
@@ -0,0 +1,21 @@
+name: Run API Leak Detector
+description: Runs the API leak detector
+inputs:
+ relative_manifest_path:
+ description: "Path to leak manifest file."
+ required: false
+ default: ""
+
+runs:
+ using: "composite"
+ steps:
+ - name: Run Detector
+ shell: bash
+ run: |
+ set -x
+ env
+ MANIFEST_FLAG=""
+ if [ "${{ inputs.relative_manifest_path }}" != "" ]; then
+ MANIFEST_FLAG="--relative-manifest-path ${{ matrix.target_platform }}/${{matrix.config}}/${{ inputs.relative_manifest_path }}"
+ fi
+ python3 starboard/tools/api_leak_detector/api_leak_detector.py -p ${{ matrix.target_platform }} -c ${{matrix.config}} --submit-check $MANIFEST_FLAG
diff --git a/.github/actions/build/action.yaml b/.github/actions/build/action.yaml
new file mode 100644
index 0000000..f641013
--- /dev/null
+++ b/.github/actions/build/action.yaml
@@ -0,0 +1,40 @@
+name: Build Cobalt
+description: Builds Cobalt targets
+runs:
+ using: "composite"
+ steps:
+ - name: Set up Cloud SDK
+ if: startsWith(${{matrix.target_platform}}, 'android')
+ uses: isarkis/setup-gcloud@40dce7857b354839efac498d3632050f568090b6 # v1.1.1
+ - name: Set Android env vars
+ if: startsWith(${{matrix.target_platform}}, 'android')
+ run: |
+ echo "ANDROID_HOME=/root/starboard-toolchains/AndroidSdk/" >> $GITHUB_ENV
+ PROJECT_NAME=$(gcloud config get-value project)
+ echo "GCS_NIGHTLY_PATH=gs://${PROJECT_NAME}-build-artifacts" >> $GITHUB_ENV
+ shell: bash
+ - name: Build
+ run: |
+ set -x
+ env
+ if [ -z ${COBALT_BOOTLOADER+x} ]; then
+ BUILD_PLATFORM=${{ matrix.target_platform }}
+ BUILD_TARGET=all
+ if [[ "${{matrix.config}}" =~ ^(qa|gold)$ ]]; then
+ BUILD_TARGET=default
+ fi
+ else
+ BUILD_PLATFORM=${COBALT_BOOTLOADER}
+ BUILD_TARGET='loader_app_install elf_loader_sandbox_install crashpad_handler_install'
+ fi
+ # GitHub Runners have home set to /github/home.
+ if [ -d /root/starboard-toolchains ]; then
+ ln -s /root/starboard-toolchains /github/home/starboard-toolchains
+ fi
+ # Set Ninja output format
+ NINJA_STATUS="[%e sec | %f/%t %u remaining | %c/sec | j%r] "
+ ninja -C ${GITHUB_WORKSPACE}/out/${BUILD_PLATFORM}_${{matrix.config}} ${BUILD_TARGET}
+ shell: bash
+ - name: Show Sccache Stats
+ run: sccache -s
+ shell: bash
diff --git a/.github/actions/docker/action.yaml b/.github/actions/docker/action.yaml
new file mode 100644
index 0000000..0c27851
--- /dev/null
+++ b/.github/actions/docker/action.yaml
@@ -0,0 +1,100 @@
+name: Docker Image Build
+description: Builds Cobalt build docker images.
+inputs:
+ docker_service:
+ description: "Docker compose service."
+ required: true
+ docker_image:
+ description: "Docker image name."
+ required: true
+
+runs:
+ using: "composite"
+ steps:
+ - name: Rename Limit
+ run: git config diff.renameLimit 999999
+ shell: bash
+ - name: Get docker file changes
+ id: changed-files
+ uses: tj-actions/changed-files@8953e851a137075e59e84b5c15fbeb3617e82f15 # v32.1.1
+ with:
+ files_ignore: third_party/**
+ files: |
+ docker-compose.yml
+ docker/linux/**
+ .github/actions/docker/**
+ - name: Retrieve Docker metadata
+ id: meta
+ uses: docker/metadata-action@507c2f2dc502c992ad446e3d7a5dfbe311567a96 # v4.3.0
+ with:
+ images: ${{env.REGISTRY}}/${{github.repository}}/${{inputs.docker_image}}
+ tags: |
+ type=ref,event=branch
+ type=ref,event=tag
+ type=ref,event=pr
+ - name: Set Docker Tag
+ id: set-docker-tag
+ run: |
+ set -x
+ docker_tag="${{ steps.meta.outputs.tags }}"
+ docker_tag="${docker_tag%.1[+,-]}"
+ echo "DOCKER_TAG=${docker_tag}" >> $GITHUB_ENV
+ shell: bash
+ # We need to set docker tag properly for pull requests. In those scenarios where no docker related files
+ # were changed we need to use an existing image (e.g. main). In cases where docker image is rebuilt we have
+ # to use tag generated by the image build.
+ - name: Set Docker Tag
+ id: set-docker-tag-presubmit-non-fork
+ env:
+ REPO: ${{ github.repository }}
+ if: ${{ (steps.changed-files.outputs.any_changed == 'false') && (github.event_name == 'pull_request') }}
+ run: echo "DOCKER_TAG=ghcr.io/${REPO}/${{inputs.docker_image}}:${GITHUB_BASE_REF%.1+}" >> $GITHUB_ENV
+ shell: bash
+ - name: Set up Cloud SDK
+ if: ${{ (steps.changed-files.outputs.any_changed == 'true') && (github.event_name == 'pull_request') && (github.event.pull_request.head.repo.fork) }}
+ uses: isarkis/setup-gcloud@40dce7857b354839efac498d3632050f568090b6 # v1.1.1
+ - name: Set Docker Tag
+ id: set-docker-tag-presubmit-fork
+ env:
+ GITHUB_EVENT_NUMBER: ${{ github.event.number }}
+ if: ${{ (steps.changed-files.outputs.any_changed == 'true') && (github.event_name == 'pull_request') && (github.event.pull_request.head.repo.fork) }}
+ run: |
+ # Need to login to GCR to be able to push images created by fork based PR workflows.
+ PROJECT_NAME=$(gcloud config get-value project)
+ METADATA="http://metadata.google.internal./computeMetadata/v1"
+ SVC_ACCT="${METADATA}/instance/service-accounts/default"
+ ACCESS_TOKEN=$(curl -H 'Metadata-Flavor: Google' ${SVC_ACCT}/token | cut -d'"' -f 4)
+ printf ${ACCESS_TOKEN} | docker login -u oauth2accesstoken --password-stdin https://gcr.io
+ echo "DOCKER_TAG=gcr.io/${PROJECT_NAME}/${{inputs.docker_image}}:pr-${GITHUB_EVENT_NUMBER}" >> $GITHUB_ENV
+ shell: bash
+ - name: Process Docker metadata
+ id: process-docker-metadata
+ run: |
+ set -x
+ set +e
+ docker manifest inspect $DOCKER_TAG > /dev/null
+ if [[ $? -ne 0 || ${{ steps.changed-files.outputs.any_changed }} == 'true' ]]; then
+ echo "need_to_build=true" >> $GITHUB_ENV
+ else
+ echo "need_to_build=false" >> $GITHUB_ENV
+ fi
+ shell: bash
+ - name: Build containers with docker-compose
+ id: build-image
+ if: env.need_to_build == 'true'
+ env:
+ SERVICE: ${{inputs.docker_service}}
+ shell: bash
+ run: |
+ set -xue
+ DOCKER_BUILDKIT=0 docker compose -f docker-compose.yml up --build --no-start "${SERVICE}"
+ - name: Tag images
+ id: tag-images
+ if: env.need_to_build == 'true'
+ run: docker tag ${{inputs.docker_image}} $DOCKER_TAG
+ shell: bash
+ - name: Push images
+ id: push-image
+ if: env.need_to_build == 'true'
+ run: docker push ${DOCKER_TAG}
+ shell: bash
diff --git a/.github/actions/docker_win/action.yaml b/.github/actions/docker_win/action.yaml
new file mode 100644
index 0000000..a082e1e
--- /dev/null
+++ b/.github/actions/docker_win/action.yaml
@@ -0,0 +1,76 @@
+name: Docker Image Build
+description: Builds Cobalt build docker images.
+inputs:
+ service:
+ description: "Service name from docker compose."
+ required: true
+runs:
+ using: "composite"
+ steps:
+ - name: Rename Limit
+ run: git config diff.renameLimit 999999
+ shell: bash
+ - name: Get docker file changes
+ id: changed-files
+ uses: tj-actions/changed-files@8953e851a137075e59e84b5c15fbeb3617e82f15 # v32.1.1
+ with:
+ files_ignore: third_party/**
+ files: |
+ docker-compose-windows.yml
+ docker/windows/**
+ .github/actions/docker_win/**
+ - name: Retrieve Docker metadata
+ id: meta
+ uses: docker/metadata-action@507c2f2dc502c992ad446e3d7a5dfbe311567a96 # v4.3.0
+ with:
+ images: ${{env.REGISTRY}}/${{github.repository}}/cobalt-${{inputs.service}}
+ tags: |
+ type=ref,event=branch
+ type=ref,event=tag
+ type=ref,event=pr
+ - name: Set Docker Tag
+ run: |
+ set -x
+ docker_tag="${{ steps.meta.outputs.tags }}"
+ docker_tag="${docker_tag%.1[+,-]}"
+ echo "DOCKER_TAG=${docker_tag}" >> $GITHUB_ENV
+ shell: bash
+ # We need to set docker tag properly for pull requests. In those scenarios where no docker related files
+ # were changed we need to use an existing image (e.g. main). In cases where docker image is rebuilt we have
+ # to use tag generated by the image build.
+ - name: Set Docker Tag
+ if: ${{ (steps.changed-files.outputs.any_changed == 'false') && (github.event_name == 'pull_request') }}
+ env:
+ REPO: ${{ github.repository }}
+ run: echo "DOCKER_TAG=ghcr.io/${REPO}/cobalt-${{inputs.service}}:${GITHUB_BASE_REF%.1+}" >> $GITHUB_ENV
+ shell: bash
+ - name: Process Docker metadata
+ id: process-docker
+ run: |
+ set -x
+ set +e
+ docker manifest inspect $DOCKER_TAG > /dev/null
+ if [[ $? -ne 0 || ${{ steps.changed-files.outputs.any_changed }} == 'true' ]]; then
+ echo "need_to_build=true" >> $GITHUB_ENV
+ else
+ echo "need_to_build=false" >> $GITHUB_ENV
+ fi
+ shell: bash
+ - name: Build containers with docker-compose
+ if: env.need_to_build == 'true'
+ env:
+ DOCKER_CPUS: 2
+ SERVICE: ${{inputs.service}}
+ shell: bash
+ run: |
+ set -xue
+ docker compose -f docker-compose-windows.yml up --no-start "${SERVICE}"
+ - name: Tag images
+ if: ${{ (env.need_to_build == 'true') && ((github.event_name != 'pull_request') || (!github.event.pull_request.head.repo.fork)) }}
+ run: |
+ docker tag cobalt-${{inputs.service}} $DOCKER_TAG
+ shell: bash
+ - name: Push images
+ if: ${{ (env.need_to_build == 'true') && ((github.event_name != 'pull_request') || (!github.event.pull_request.head.repo.fork)) }}
+ run: docker push ${DOCKER_TAG}
+ shell: bash
diff --git a/.github/actions/gn/action.yaml b/.github/actions/gn/action.yaml
new file mode 100644
index 0000000..7afac03
--- /dev/null
+++ b/.github/actions/gn/action.yaml
@@ -0,0 +1,36 @@
+name: GN
+description: Generates and checks GN.
+runs:
+ using: "composite"
+ steps:
+ - name: Configure Environment
+ shell: bash
+ run: |
+ echo "PYTHONPATH=$GITHUB_WORKSPACE" >> $GITHUB_ENV
+ - name: Set up Cloud SDK
+ if: startsWith(${{matrix.target_platform}}, 'android')
+ uses: isarkis/setup-gcloud@40dce7857b354839efac498d3632050f568090b6 # v1.1.1
+ - name: Configure Android Environment
+ shell: bash
+ if: startsWith(${{matrix.target_platform}}, 'android')
+ run: |
+ echo "ANDROID_HOME=/root/starboard-toolchains/AndroidSdk/" >> $GITHUB_ENV
+ echo "COBALT_GRADLE_BUILD_COUNT=24" >> $GITHUB_ENV
+ PROJECT_NAME=$(gcloud config get-value project)
+ echo "GCS_NIGHTLY_PATH=gs://${PROJECT_NAME}-build-artifacts" >> $GITHUB_ENV
+ - name: GN
+ run: |
+ set -x
+ extra_arguments="${{matrix.extra_gn_arguments}}"
+ if [ -z ${COBALT_BOOTLOADER+x} ]; then
+ BUILD_PLATFORM=${{ matrix.target_platform }}
+ else
+ BUILD_PLATFORM=${COBALT_BOOTLOADER}
+ if [ ! -z "${{matrix.bootloader_extra_gn_arguments}}" ]
+ then
+ extra_arguments="${{matrix.bootloader_extra_gn_arguments}}"
+ fi
+ fi
+ gn gen $GITHUB_WORKSPACE/out/${BUILD_PLATFORM}_${{matrix.config}} --args="target_platform=\"${BUILD_PLATFORM}\" ${{matrix.sb_api_version}} ${{matrix.target_os}} ${{matrix.target_cpu}} ${extra_arguments} is_internal_build=false build_type=\"${{matrix.config}}\""
+ gn check $GITHUB_WORKSPACE/out/${BUILD_PLATFORM}_${{ matrix.config }}
+ shell: bash
diff --git a/.github/actions/on_device_tests/action.yaml b/.github/actions/on_device_tests/action.yaml
new file mode 100644
index 0000000..d07f984
--- /dev/null
+++ b/.github/actions/on_device_tests/action.yaml
@@ -0,0 +1,102 @@
+name: On Device Test
+description: Runs on-device tests.
+
+runs:
+ using: "composite"
+ steps:
+ - name: Install requirements
+ run: |
+ pip3 install grpcio==1.38.0 grpcio-tools==1.38.0
+ shell: bash
+ - name: Generate gRPC files
+ run: |
+ python -m grpc_tools.protoc -Itools/ --python_out=tools/ --grpc_python_out=tools/ tools/on_device_tests_gateway.proto
+ shell: bash
+ - name: Set up Cloud SDK
+ uses: isarkis/setup-gcloud@40dce7857b354839efac498d3632050f568090b6 # v1.1.1
+ - name: Set env vars
+ env:
+ WORKFLOW: ${{ github.workflow }}
+ run: |
+ echo "PROJECT_NAME=$(gcloud config get-value project)" >> $GITHUB_ENV
+ echo "GITHUB_RUN_NUMBER=${GITHUB_RUN_NUMBER}" >> $GITHUB_ENV
+ echo "WORKFLOW=${WORKFLOW}" >> $GITHUB_ENV
+
+ # Boot loader env
+ if [ "${COBALT_BOOTLOADER}" != "null" ]; then
+ echo "LOADER_CONFIG=${{ matrix.config }}" >> $GITHUB_ENV
+ echo "LOADER_PLATFORM=${COBALT_BOOTLOADER}" >> $GITHUB_ENV
+ fi
+
+ # Dimension env
+ if [ "${{ matrix.dimension }}" != "null" ]; then
+ echo "DIMENSION=${{ matrix.dimension }}" >> $GITHUB_ENV
+ fi
+
+ # Shard env
+ if [[ "${{matrix.shard}}" == 'black_box_test' || "${{matrix.shard}}" == 'evergreen_test' || "${{matrix.shard}}" == 'unit_test' ]]; then
+ echo "SHARD_NAME=${{ matrix.shard }}" >> $GITHUB_ENV
+ echo "TEST_TYPE=${{ matrix.shard }}" >> $GITHUB_ENV
+ else
+ echo "SHARD_NAME=unit_test_${{ matrix.shard }}" >> $GITHUB_ENV
+ echo "TEST_TYPE=unit_test" >> $GITHUB_ENV
+ echo "USE_SHARDING=1" >> $GITHUB_ENV
+ fi
+ shell: bash
+ - name: trigger ${{ env.SHARD_NAME }} tests on ${{ matrix.platform }} platform
+ env:
+ GITHUB_SHA: ${{ github.sha }}
+ GITHUB_TOKEN: ${{ github.token }}
+ GITHUB_PR_HEAD_SHA: ${{ github.event.pull_request.head.sha }}
+ GITHUB_EVENT_NAME: ${{ github.event_name }}
+ GITHUB_ACTOR: ${{ github.actor }}
+ GITHUB_TRIGGERING_ACTOR: ${{ github.triggering_actor }}
+ GITHUB_ACTOR_ID: ${{ github.actor_id }}
+ GITHUB_REPO: ${{ github.repository }}
+ GITHUB_PR_HEAD_USER_LOGIN: ${{ github.event.pull_request.head.user.login }}
+ GITHUB_PR_HEAD_USER_ID: ${{ github.event.pull_request.head.user.id }}
+ GITHUB_COMMIT_AUTHOR_USERNAME: ${{ github.event.commits[0].author.username }}
+ GITHUB_COMMIT_AUTHOR_EMAIL: ${{ github.event.commits[0].author.email }}
+ run: |
+ set -uxe
+ SESSION_ID=$(
+ python3 tools/on_device_tests_gateway_client.py \
+ --token ${GITHUB_TOKEN} \
+ --change_id "${GITHUB_PR_HEAD_SHA:-$GITHUB_SHA}" \
+ trigger \
+ --test_type ${{ env.TEST_TYPE }} \
+ --platform ${{ matrix.target_platform }} \
+ --config ${{ matrix.config }} \
+ --tag cobalt_github_${GITHUB_EVENT_NAME} \
+ --builder_name github_${{ matrix.platform }}_tests \
+ --build_number ${GITHUB_RUN_NUMBER} \
+ ${LOADER_PLATFORM:+"--loader_config" "$LOADER_CONFIG"} \
+ ${LOADER_PLATFORM:+"--loader_platform" "$LOADER_PLATFORM"} \
+ ${DIMENSION:+"--dimension" "$DIMENSION"} \
+ ${USE_SHARDING:+"--unittest_shard_index" "${{ matrix.shard }}"} \
+ ${ON_DEVICE_TEST_ATTEMPTS:+"--test_attempts" "$ON_DEVICE_TEST_ATTEMPTS"} \
+ --archive_path gs://${PROJECT_NAME}-test-artifacts/${WORKFLOW}/${GITHUB_RUN_NUMBER}/${{ matrix.platform }}_${{ matrix.config }}/artifacts.tar \
+ --label github \
+ --label ${GITHUB_EVENT_NAME} \
+ --label ${WORKFLOW} \
+ --label actor-${GITHUB_ACTOR} \
+ --label actor_id-${GITHUB_ACTOR_ID} \
+ --label triggering_actor-${GITHUB_TRIGGERING_ACTOR} \
+ --label sha-${GITHUB_SHA} \
+ --label repository-${GITHUB_REPO} \
+ --label author-${GITHUB_PR_HEAD_USER_LOGIN:-$GITHUB_COMMIT_AUTHOR_USERNAME} \
+ --label author_id-${GITHUB_PR_HEAD_USER_ID:-$GITHUB_COMMIT_AUTHOR_EMAIL}
+ )
+ echo "SESSION_ID=$SESSION_ID" >> $GITHUB_ENV
+ shell: bash
+ - name: watch ${{ env.SHARD_NAME }} tests on ${{ matrix.platform }} platform
+ env:
+ GITHUB_TOKEN: ${{ github.token }}
+ GITHUB_SHA: ${{ github.sha }}
+ run: |
+ set -uxe
+ python3 tools/on_device_tests_gateway_client.py \
+ --token "${GITHUB_TOKEN}" \
+ --change_id "${GITHUB_SHA}" \
+ watch ${{ env.SESSION_ID }}
+ shell: bash
diff --git a/.github/actions/on_host_test/action.yaml b/.github/actions/on_host_test/action.yaml
new file mode 100644
index 0000000..a10a8bf
--- /dev/null
+++ b/.github/actions/on_host_test/action.yaml
@@ -0,0 +1,99 @@
+name: On Host Test
+description: Runs on-host tests.
+inputs:
+ os:
+ description: "Host OS (either linux or windows)."
+ required: true
+runs:
+ using: "composite"
+ steps:
+ - name: Set up Cloud SDK
+ uses: isarkis/setup-gcloud@40dce7857b354839efac498d3632050f568090b6 # v1.1.1
+ - name: Configure Environment
+ id: configure-environment
+ shell: bash
+ run: |
+ set -x
+ if [ "${{inputs.os}}" == 'linux' ]
+ then
+ echo "ARCHIVE_EXTENSION=tar.xz" >> $GITHUB_ENV
+ elif [ "${{inputs.os}}" == 'windows' ]
+ then
+ echo "ARCHIVE_EXTENSION=tar.gz" >> $GITHUB_ENV
+ fi
+ - name: Download Archive
+ shell: bash
+ env:
+ WORKFLOW: ${{ github.workflow }}
+ run: |
+ set -x
+ PROJECT_NAME=$(gcloud config get-value project)
+ gsutil cp gs://${PROJECT_NAME}-test-artifacts/${WORKFLOW}/${GITHUB_RUN_NUMBER}/${{matrix.platform}}_${{matrix.config}}/${{matrix.platform}}_${{matrix.config}}.${ARCHIVE_EXTENSION} ${GITHUB_WORKSPACE}/out/tmp/${{matrix.platform}}_${{matrix.config}}.${ARCHIVE_EXTENSION}
+ - name: Extract Archive
+ shell: bash
+ run: |
+ set -x
+ parallel=
+ if [[ "${{inputs.os}}" == 'linux' ]]; then
+ parallel="--parallel"
+ fi
+ python3 ${GITHUB_WORKSPACE}/tools/create_archive.py -x -s ${GITHUB_WORKSPACE}/out/tmp/${{matrix.platform}}_${{matrix.config}}.${ARCHIVE_EXTENSION} -d ${GITHUB_WORKSPACE}/out ${parallel}
+ rm -rf ${GITHUB_WORKSPACE}/out/tmp
+ - name: Download Bootloader Archive
+ if: ${{ env.COBALT_BOOTLOADER != null && env.COBALT_BOOTLOADER != 'null' }}
+ shell: bash
+ env:
+ WORKFLOW: ${{ github.workflow }}
+ run: |
+ set -x
+ PROJECT_NAME=$(gcloud config get-value project)
+ gsutil cp gs://${PROJECT_NAME}-test-artifacts/${WORKFLOW}/${GITHUB_RUN_NUMBER}/${{matrix.platform}}_${{matrix.config}}/${COBALT_BOOTLOADER}_${{matrix.config}}.${ARCHIVE_EXTENSION} ${GITHUB_WORKSPACE}/out/tmp/${COBALT_BOOTLOADER}_${{matrix.config}}.${ARCHIVE_EXTENSION}
+ - name: Extract Bootloader Archive
+ if: ${{ env.COBALT_BOOTLOADER != null && env.COBALT_BOOTLOADER != 'null' }}
+ shell: bash
+ run: |
+ set -x
+ python3 ${GITHUB_WORKSPACE}/tools/create_archive.py -x -s ${GITHUB_WORKSPACE}/out/tmp/${COBALT_BOOTLOADER}_${{matrix.config}}.${ARCHIVE_EXTENSION} -d ${GITHUB_WORKSPACE}/out --parallel
+ rm -rf ${GITHUB_WORKSPACE}/out/tmp
+ - name: Set Env Variables
+ shell: bash
+ run: |
+ echo "PYTHONPATH=$GITHUB_WORKSPACE" >> $GITHUB_ENV
+ echo "TEST_RESULTS_DIR=${GITHUB_WORKSPACE}/unit-test-results" >> $GITHUB_ENV
+ echo "TEST_REPORT_FILE=${GITHUB_WORKSPACE}/${{matrix.platform}}-${{matrix.shard}}" >> $GITHUB_ENV
+ - name: Run Tests
+ shell: bash
+ run: |
+ set -x
+ loader_args=''
+ if [ "${COBALT_BOOTLOADER}" != "null" ]; then
+ loader_args="--loader_platform ${COBALT_BOOTLOADER} --loader_config ${{matrix.config}}"
+ fi
+ if [[ "${{matrix.shard}}" == 'integration' ]]; then
+ xvfb-run -a --server-args="-screen 0 1920x1080x24i +render +extension GLX -noreset" python3 $GITHUB_WORKSPACE/cobalt/black_box_tests/black_box_tests.py --platform ${{matrix.target_platform}} --config ${{matrix.config}} ${loader_args}
+ elif [[ "${{matrix.shard}}" == 'blackbox' ]]; then
+ xvfb-run -a --server-args="-screen 0 1920x1080x24i +render +extension GLX -noreset" python3 $GITHUB_WORKSPACE/cobalt/black_box_tests/black_box_tests.py --platform ${{matrix.target_platform}} --config ${{matrix.config}} ${loader_args} --test_set blackbox
+ elif [[ "${{matrix.shard}}" == 'wpt' ]]; then
+ xvfb-run -a --server-args="-screen 0 1920x1080x24i +render +extension GLX -noreset" python3 $GITHUB_WORKSPACE/cobalt/black_box_tests/black_box_tests.py --platform ${{matrix.target_platform}} --config ${{matrix.config}} ${loader_args} --test_set wpt
+ elif [[ "${{matrix.shard}}" == 'evergreen' ]]; then
+ xvfb-run -a --server-args="-screen 0 1920x1080x24i +render +extension GLX -noreset" python3 $GITHUB_WORKSPACE/cobalt/evergreen_tests/evergreen_tests.py --platform ${{matrix.target_platform}} --config ${{matrix.config}} ${loader_args} --no-can_mount_tmpfs
+ else
+ if [[ "${{inputs.os}}" == 'windows' ]]; then
+ python3 ${GITHUB_WORKSPACE}/starboard/tools/testing/test_runner.py --platform ${{matrix.target_platform}} --config ${{matrix.config}} -s ${{matrix.shard}} -r
+ else
+ xvfb-run -a --server-args="-screen 0 1920x1080x24i +render +extension GLX -noreset" python3 ${GITHUB_WORKSPACE}/starboard/tools/testing/test_runner.py --platform ${{matrix.target_platform}} --config ${{matrix.config}} -s ${{matrix.shard}} -r ${loader_args} --xml_output_dir=${TEST_RESULTS_DIR}
+ fi
+ fi
+ - name: Process unit test results
+ if: failure()
+ shell: bash
+ run: |
+ set -x
+ echo "Saving unit test report to ${TEST_REPORT_FILE}"
+ python3 ${GITHUB_WORKSPACE}/starboard/tools/testing/test_report_parser.py ${TEST_RESULTS_DIR} > ${TEST_REPORT_FILE}
+ - name: Upload unit test report
+ uses: actions/upload-artifact@v3
+ if: failure()
+ with:
+ name: unit-test-reports
+ path: ${{env.TEST_REPORT_FILE}}
diff --git a/.github/actions/pre_commit/action.yaml b/.github/actions/pre_commit/action.yaml
new file mode 100644
index 0000000..d8bf9a8
--- /dev/null
+++ b/.github/actions/pre_commit/action.yaml
@@ -0,0 +1,25 @@
+name: pre-commit
+description: Runs pre-commit
+inputs:
+ base_ref:
+ description: "Ref to run from"
+ required: true
+runs:
+ using: "composite"
+ steps:
+ - run: python -m pip install pre-commit
+ shell: bash
+ - run: python -m pip freeze --local
+ shell: bash
+ - uses: actions/cache@v3
+ with:
+ path: ~/.cache/pre-commit
+ key: pre-commit-3|${{ env.pythonLocation }}|${{ hashFiles('.pre-commit-config.yaml') }}
+ - run: pre-commit run --show-diff-on-failure --color=always --from-ref ${{ inputs.base_ref }} --to-ref HEAD
+ shell: bash
+ env:
+ SKIP: 'run-py2-tests'
+ - run: pre-commit run --show-diff-on-failure --color=always --hook-stage push --from-ref ${{ inputs.base_ref }} --to-ref HEAD
+ shell: bash
+ env:
+ SKIP: 'test-download-from-gcs-helper,check-bug-in-commit-message,check-if-starboard-interface-changed'
diff --git a/.github/actions/upload_nightly_artifacts/action.yaml b/.github/actions/upload_nightly_artifacts/action.yaml
new file mode 100644
index 0000000..345e06b
--- /dev/null
+++ b/.github/actions/upload_nightly_artifacts/action.yaml
@@ -0,0 +1,39 @@
+name: Upload Nightly Artifacts
+description: Archives and uploads nightly artifacts to GCS bucket.
+runs:
+ using: "composite"
+ steps:
+ - name: Set up Cloud SDK
+ uses: isarkis/setup-gcloud@40dce7857b354839efac498d3632050f568090b6 # v1.1.1
+ - name: Set env vars
+ env:
+ WORKFLOW: ${{github.workflow}}
+ run: |
+ echo "ARCHIVE_FILE=cobalt-${{matrix.platform}}_${{matrix.config}}.tar.gz" >> $GITHUB_ENV
+ echo "ARCHIVE_PATH=$GITHUB_WORKSPACE/cobalt-${{matrix.platform}}_${{matrix.config}}.tar.gz" >> $GITHUB_ENV
+ echo "PROJECT_NAME=$(gcloud config get-value project)" >> $GITHUB_ENV
+ echo "TODAY=$(date +'%Y-%m-%d')" >> $GITHUB_ENV
+ echo "GITHUB_RUN_NUMBER=${GITHUB_RUN_NUMBER}" >> $GITHUB_ENV
+ echo "WORKFLOW=${WORKFLOW}" >> $GITHUB_ENV
+ echo "PYTHONPATH=$GITHUB_WORKSPACE" >> $GITHUB_ENV
+ shell: bash
+ - name: Copy Out Folder
+ run: |
+ # Clean up.
+ [ -d "${GITHUB_WORKSPACE}/out/upload_out" ] && rm -rf "${GITHUB_WORKSPACE}/out/upload_out"
+ [ -f "${ARCHIVE_FILE}" ] && rm -rf "${ARCHIVE_FILE}"
+ # Create an archive.
+ python3 $GITHUB_WORKSPACE/tools/copy_and_filter_out_dir.py -d $GITHUB_WORKSPACE/out/upload_out/${{matrix.target_platform}}_${{matrix.config}} -s $GITHUB_WORKSPACE/out/${{matrix.target_platform}}_${{matrix.config}}
+ shell: bash
+ - name: Create Archive
+ run: |
+ set -x
+ cd "$GITHUB_WORKSPACE"
+ python3 $GITHUB_WORKSPACE/tools/create_archive.py --intermediate -d ${{env.ARCHIVE_FILE}} -s out/upload_out
+ shell: bash
+ - name: Upload Archive
+ id: upload-archive
+ shell: bash
+ run: |
+ set -uex
+ gsutil -d cp "${ARCHIVE_PATH}" "gs://${PROJECT_NAME}-build-artifacts/${WORKFLOW}/${TODAY}/${GITHUB_RUN_NUMBER}/"
diff --git a/.github/actions/upload_test_artifacts/action.yaml b/.github/actions/upload_test_artifacts/action.yaml
new file mode 100644
index 0000000..d2923eb
--- /dev/null
+++ b/.github/actions/upload_test_artifacts/action.yaml
@@ -0,0 +1,78 @@
+name: Test Artifact Upload
+description: Uploads test archives to GCS and runs on-device tests.
+inputs:
+ type:
+ description: "Type of artifacts to upload (ondevice or onhost)"
+ required: true
+ os:
+ description: "Host OS (either linux or windows)."
+ required: true
+runs:
+ using: "composite"
+ steps:
+ - name: Set up Cloud SDK
+ uses: isarkis/setup-gcloud@40dce7857b354839efac498d3632050f568090b6 # v1.1.1
+ - name: Configure Environment
+ env:
+ WORKFLOW: ${{ github.workflow }}
+ run: |
+ set -x
+ project_name=$(gcloud config get-value project)
+ if [ -z ${COBALT_BOOTLOADER+x} ]
+ then
+ PLATFORM=${{matrix.platform}}
+ echo "TARGET_PLATFORM=${{matrix.target_platform}}" >> $GITHUB_ENV
+ else
+ PLATFORM=${COBALT_BOOTLOADER}
+ echo "TARGET_PLATFORM=${COBALT_BOOTLOADER}" >> $GITHUB_ENV
+ fi
+
+ if [ "${{ inputs.type }}" == 'ondevice' ]
+ then
+ echo "ARCHIVE_FILE=artifacts.tar" >> $GITHUB_ENV
+ echo "ARCHIVE_PATH=$GITHUB_WORKSPACE/artifacts.tar" >> $GITHUB_ENV
+ echo "DESTINATION=${project_name}-test-artifacts/${WORKFLOW}/${GITHUB_RUN_NUMBER}/${{matrix.platform}}_${{matrix.config}}/" >> $GITHUB_ENV
+ elif [ "${{ inputs.type }}" == 'onhost' ]
+ then
+ if [ "${{ inputs.os }}" == 'linux' ]
+ then
+ echo "ARCHIVE_FILE=${PLATFORM}_${{matrix.config}}.tar.xz" >> $GITHUB_ENV
+ echo "ARCHIVE_PATH=$GITHUB_WORKSPACE/${PLATFORM}_${{matrix.config}}.tar.xz" >> $GITHUB_ENV
+ elif [ "${{ inputs.os }}" == 'windows' ]
+ then
+ echo "ARCHIVE_FILE=${PLATFORM}_${{matrix.config}}.tar.gz" >> $GITHUB_ENV
+ echo "ARCHIVE_PATH=$GITHUB_WORKSPACE/${PLATFORM}_${{matrix.config}}.tar.gz" >> $GITHUB_ENV
+ fi
+ echo "DESTINATION=${project_name}-test-artifacts/${WORKFLOW}/${GITHUB_RUN_NUMBER}/${{matrix.platform}}_${{matrix.config}}/" >> $GITHUB_ENV
+ fi
+
+ echo "PYTHONPATH=$GITHUB_WORKSPACE" >> $GITHUB_ENV
+ project_name=$(gcloud config get-value project)
+ shell: bash
+ - name: Create Test Files Archive
+ run: |
+ set -x
+ [ -f "${ARCHIVE_PATH}" ] && rm -rf "${ARCHIVE_PATH}"
+ if [ "${{ inputs.type }}" == 'ondevice' ]
+ then
+ outdir="$GITHUB_WORKSPACE/out/${{matrix.target_platform}}_${{matrix.config}}"
+ if [ -n "${COBALT_BOOTLOADER}" ]
+ then
+ outdir="${outdir} $GITHUB_WORKSPACE/out/${COBALT_BOOTLOADER}_${{matrix.config}}"
+ fi
+ python3 $GITHUB_WORKSPACE/tools/create_archive.py --test_infra -d ${{env.ARCHIVE_FILE}} -s ${outdir}
+ elif [ "${{ inputs.type }}" == 'onhost' ]
+ then
+ parallel=
+ if [[ "${{inputs.os}}" == 'linux' ]]; then
+ parallel='--parallel'
+ fi
+ python3 $GITHUB_WORKSPACE/tools/create_archive.py --intermediate -d ${{env.ARCHIVE_FILE}} -s $GITHUB_WORKSPACE/out/${TARGET_PLATFORM}_${{matrix.config}} $parallel
+ fi
+ shell: bash
+ - name: Copy Test Files to GCS
+ id: upload-test-archive
+ shell: bash
+ run: |
+ set -eux
+ gsutil -d cp "${ARCHIVE_PATH}" "gs://${DESTINATION}"
diff --git a/.github/config/android-arm.json b/.github/config/android-arm.json
new file mode 100644
index 0000000..50e30ce
--- /dev/null
+++ b/.github/config/android-arm.json
@@ -0,0 +1,26 @@
+{
+ "docker_service": "build-android",
+ "on_device_test": {
+ "enabled": true,
+ "tests": [
+ "0",
+ "1",
+ "2",
+ "3",
+ "black_box_test"
+ ],
+ "test_attempts": 2
+ },
+ "platforms": [
+ "android-arm"
+ ],
+ "includes": [
+ {
+ "name":"arm",
+ "platform":"android-arm",
+ "target_platform":"android-arm",
+ "target_cpu":"target_cpu=\\\"arm\\\"",
+ "target_os":"target_os=\\\"android\\\""
+ }
+ ]
+}
diff --git a/.github/config/android-arm64.json b/.github/config/android-arm64.json
new file mode 100644
index 0000000..fe96371
--- /dev/null
+++ b/.github/config/android-arm64.json
@@ -0,0 +1,26 @@
+{
+ "docker_service": "build-android",
+ "on_device_test": {
+ "enabled": true,
+ "tests": [
+ "0",
+ "1",
+ "2",
+ "3",
+ "black_box_test"
+ ],
+ "test_attempts": 2
+ },
+ "platforms": [
+ "android-arm64"
+ ],
+ "includes": [
+ {
+ "name":"arm64",
+ "platform":"android-arm64",
+ "target_platform":"android-arm64",
+ "target_cpu":"target_cpu=\\\"arm64\\\"",
+ "target_os": "target_os=\\\"android\\\""
+ }
+ ]
+}
diff --git a/.github/config/android-x86.json b/.github/config/android-x86.json
new file mode 100644
index 0000000..7e371e9
--- /dev/null
+++ b/.github/config/android-x86.json
@@ -0,0 +1,26 @@
+{
+ "docker_service": "build-android",
+ "on_device_test": {
+ "enabled": true,
+ "tests": [
+ "0",
+ "1",
+ "2",
+ "3",
+ "black_box_test"
+ ],
+ "test_attempts": 2
+ },
+ "platforms": [
+ "android-x86"
+ ],
+ "includes": [
+ {
+ "name":"x86",
+ "platform":"android-x86",
+ "target_platform":"android-x86",
+ "target_cpu":"target_cpu=\\\"x86\\\"",
+ "target_os": "target_os=\\\"android\\\""
+ }
+ ]
+}
diff --git a/.github/config/evergreen-arm-hardfp.json b/.github/config/evergreen-arm-hardfp.json
new file mode 100644
index 0000000..8067c72
--- /dev/null
+++ b/.github/config/evergreen-arm-hardfp.json
@@ -0,0 +1,62 @@
+{
+ "docker_service": "build-raspi",
+ "bootloader": "raspi-2",
+ "on_device_test": {
+ "enabled": true,
+ "tests": [
+ "evergreen_test",
+ "0",
+ "1",
+ "2",
+ "3"
+ ],
+ "test_attempts": 2
+ },
+ "platforms": [
+ "evergreen-arm-hardfp",
+ "evergreen-arm-hardfp-sbversion-15",
+ "evergreen-arm-hardfp-sbversion-14",
+ "evergreen-arm-hardfp-sbversion-13"
+ ],
+ "includes": [
+ {
+ "name":"hardfp",
+ "platform":"evergreen-arm-hardfp",
+ "target_platform":"evergreen-arm-hardfp",
+ "target_cpu":"target_cpu=\\\"arm\\\"",
+ "extra_gn_arguments":"use_asan=false",
+ "bootloader_extra_gn_arguments": "use_asan=false is_clang=false",
+ "dimension": "release_version=regex:10.*"
+ },
+ {
+ "name":"sbversion-15",
+ "platform":"evergreen-arm-hardfp-sbversion-15",
+ "target_platform":"evergreen-arm-hardfp",
+ "target_cpu":"target_cpu=\\\"arm\\\"",
+ "extra_gn_arguments":"use_asan=false",
+ "bootloader_extra_gn_arguments":"use_asan=false is_clang=false",
+ "sb_api_version": "sb_api_version=15",
+ "dimension": "release_version=regex:10.*"
+ },
+ {
+ "name":"sbversion-14",
+ "platform":"evergreen-arm-hardfp-sbversion-14",
+ "target_platform":"evergreen-arm-hardfp",
+ "target_cpu":"target_cpu=\\\"arm\\\"",
+ "extra_gn_arguments":"use_asan=false",
+ "bootloader_extra_gn_arguments":"use_asan=false is_clang=false",
+ "sb_api_version": "sb_api_version=14",
+ "dimension": "release_version=regex:10.*"
+ },
+ {
+ "name":"sbversion-13",
+ "platform":"evergreen-arm-hardfp-sbversion-13",
+ "target_platform":"evergreen-arm-hardfp",
+ "target_cpu":"target_cpu=\\\"arm\\\"",
+ "extra_gn_arguments":"use_asan=false",
+ "bootloader_extra_gn_arguments":"use_asan=false is_clang=false",
+ "sb_api_version": "sb_api_version=13",
+ "dimension": "release_version=regex:10.*"
+ }
+ ]
+}
diff --git a/.github/config/evergreen-arm-softfp.json b/.github/config/evergreen-arm-softfp.json
new file mode 100644
index 0000000..a90304c
--- /dev/null
+++ b/.github/config/evergreen-arm-softfp.json
@@ -0,0 +1,42 @@
+{
+ "docker_service": "build-evergreen",
+ "platforms": [
+ "evergreen-arm-softfp",
+ "evergreen-arm-softfp-sbversion-15",
+ "evergreen-arm-softfp-sbversion-14",
+ "evergreen-arm-softfp-sbversion-13"
+ ],
+ "includes": [
+ {
+ "name":"softfp",
+ "platform":"evergreen-arm-softfp",
+ "target_platform":"evergreen-arm-softfp",
+ "target_cpu":"target_cpu=\\\"arm\\\"",
+ "extra_gn_arguments":"use_asan=false"
+ },
+ {
+ "name":"sbversion-15",
+ "platform":"evergreen-arm-softfp-sbversion-15",
+ "target_platform":"evergreen-arm-softfp",
+ "target_cpu":"target_cpu=\\\"arm\\\"",
+ "extra_gn_arguments":"use_asan=false",
+ "sb_api_version":"sb_api_version=15"
+ },
+ {
+ "name":"sbversion-14",
+ "platform":"evergreen-arm-softfp-sbversion-14",
+ "target_platform":"evergreen-arm-softfp",
+ "target_cpu":"target_cpu=\\\"arm\\\"",
+ "extra_gn_arguments":"use_asan=false",
+ "sb_api_version":"sb_api_version=14"
+ },
+ {
+ "name":"sbversion-13",
+ "platform":"evergreen-arm-softfp-sbversion-13",
+ "target_platform":"evergreen-arm-softfp",
+ "target_cpu":"target_cpu=\\\"arm\\\"",
+ "extra_gn_arguments":"use_asan=false",
+ "sb_api_version":"sb_api_version=13"
+ }
+ ]
+}
diff --git a/.github/config/evergreen-arm64.json b/.github/config/evergreen-arm64.json
new file mode 100644
index 0000000..607c332
--- /dev/null
+++ b/.github/config/evergreen-arm64.json
@@ -0,0 +1,42 @@
+{
+ "docker_service": "build-evergreen",
+ "platforms": [
+ "evergreen-arm64",
+ "evergreen-arm64-sbversion-15",
+ "evergreen-arm64-sbversion-14",
+ "evergreen-arm64-sbversion-13"
+ ],
+ "includes": [
+ {
+ "name":"arm64",
+ "platform":"evergreen-arm64",
+ "target_platform":"evergreen-arm64",
+ "target_cpu":"target_cpu=\\\"arm64\\\"",
+ "extra_gn_arguments":"use_asan=false"
+ },
+ {
+ "name":"sbversion-15",
+ "platform":"evergreen-arm64-sbversion-15",
+ "target_platform":"evergreen-arm64",
+ "target_cpu":"target_cpu=\\\"arm64\\\"",
+ "extra_gn_arguments":"use_asan=false",
+ "sb_api_version":"sb_api_version=15"
+ },
+ {
+ "name":"sbversion-14",
+ "platform":"evergreen-arm64-sbversion-14",
+ "target_platform":"evergreen-arm64",
+ "target_cpu":"target_cpu=\\\"arm64\\\"",
+ "extra_gn_arguments":"use_asan=false",
+ "sb_api_version":"sb_api_version=14"
+ },
+ {
+ "name":"sbversion-13",
+ "platform":"evergreen-arm64-sbversion-13",
+ "target_platform":"evergreen-arm64",
+ "target_cpu":"target_cpu=\\\"arm64\\\"",
+ "extra_gn_arguments":"use_asan=false",
+ "sb_api_version":"sb_api_version=13"
+ }
+ ]
+}
diff --git a/.github/config/evergreen-x64.json b/.github/config/evergreen-x64.json
new file mode 100644
index 0000000..f3171ec
--- /dev/null
+++ b/.github/config/evergreen-x64.json
@@ -0,0 +1,45 @@
+{
+ "docker_service": "build-linux-evergreen",
+ "on_host_test": true,
+ "bootloader": "linux-x64x11",
+ "on_host_test_shards": ["0", "1", "2", "3", "blackbox", "wpt", "evergreen"],
+ "platforms": [
+ "evergreen-x64",
+ "evergreen-x64-sbversion-15",
+ "evergreen-x64-sbversion-14",
+ "evergreen-x64-sbversion-13"
+ ],
+ "includes": [
+ {
+ "name":"x64",
+ "platform":"evergreen-x64",
+ "target_platform":"evergreen-x64",
+ "target_cpu":"target_cpu=\\\"x64\\\"",
+ "extra_gn_arguments":"use_asan=false"
+ },
+ {
+ "name":"sbversion-15",
+ "platform":"evergreen-x64-sbversion-15",
+ "target_platform":"evergreen-x64",
+ "target_cpu":"target_cpu=\\\"x64\\\"",
+ "extra_gn_arguments":"use_asan=false",
+ "sb_api_version":"sb_api_version=15"
+ },
+ {
+ "name":"sbversion-14",
+ "platform":"evergreen-x64-sbversion-14",
+ "target_platform":"evergreen-x64",
+ "target_cpu":"target_cpu=\\\"x64\\\"",
+ "extra_gn_arguments":"use_asan=false",
+ "sb_api_version":"sb_api_version=14"
+ },
+ {
+ "name":"sbversion-13",
+ "platform":"evergreen-x64-sbversion-13",
+ "target_platform":"evergreen-x64",
+ "target_cpu":"target_cpu=\\\"x64\\\"",
+ "extra_gn_arguments":"use_asan=false",
+ "sb_api_version":"sb_api_version=13"
+ }
+ ]
+}
diff --git a/.github/config/evergreen-x86.json b/.github/config/evergreen-x86.json
new file mode 100644
index 0000000..be80dde
--- /dev/null
+++ b/.github/config/evergreen-x86.json
@@ -0,0 +1,42 @@
+{
+ "docker_service": "build-evergreen",
+ "platforms": [
+ "evergreen-x86",
+ "evergreen-x86-sbversion-15",
+ "evergreen-x86-sbversion-14",
+ "evergreen-x86-sbversion-13"
+ ],
+ "includes": [
+ {
+ "name":"x86",
+ "platform":"evergreen-x86",
+ "target_platform":"evergreen-x86",
+ "target_cpu":"target_cpu=\\\"x86\\\"",
+ "extra_gn_arguments":"use_asan=false"
+ },
+ {
+ "name":"sbversion-15",
+ "platform":"evergreen-x86-sbversion-15",
+ "target_platform":"evergreen-x86",
+ "target_cpu":"target_cpu=\\\"x86\\\"",
+ "extra_gn_arguments":"use_asan=false",
+ "sb_api_version":"sb_api_version=15"
+ },
+ {
+ "name":"sbversion-14",
+ "platform":"evergreen-x86-sbversion-14",
+ "target_platform":"evergreen-x86",
+ "target_cpu":"target_cpu=\\\"x86\\\"",
+ "extra_gn_arguments":"use_asan=false",
+ "sb_api_version":"sb_api_version=14"
+ },
+ {
+ "name":"sbversion-13",
+ "platform":"evergreen-x86-sbversion-13",
+ "target_platform":"evergreen-x86",
+ "target_cpu":"target_cpu=\\\"x86\\\"",
+ "extra_gn_arguments":"use_asan=false",
+ "sb_api_version":"sb_api_version=13"
+ }
+ ]
+}
diff --git a/.github/config/linux-clang-3-9.json b/.github/config/linux-clang-3-9.json
new file mode 100644
index 0000000..59bcc19
--- /dev/null
+++ b/.github/config/linux-clang-3-9.json
@@ -0,0 +1,16 @@
+{
+ "docker_service": "build-linux-clang-3-9",
+ "on_host_test": true,
+ "on_host_test_shards": ["0", "1", "2", "3", "blackbox", "wpt"],
+ "platforms": [
+ "linux-x64x11-clang-3-9"
+ ],
+ "includes": [
+ {
+ "name":"clang-3-9",
+ "platform":"linux-x64x11-clang-3-9",
+ "target_platform":"linux-x64x11-clang-3-9",
+ "extra_gn_arguments":"using_old_compiler=true"
+ }
+ ]
+}
diff --git a/.github/config/linux-gcc-6-3.json b/.github/config/linux-gcc-6-3.json
new file mode 100644
index 0000000..9b65750
--- /dev/null
+++ b/.github/config/linux-gcc-6-3.json
@@ -0,0 +1,14 @@
+{
+ "docker_service": "build-linux-gcc",
+ "platforms": [
+ "linux-x64x11-gcc-6-3"
+ ],
+ "includes": [
+ {
+ "name":"gcc-6-3",
+ "platform":"linux-x64x11-gcc-6-3",
+ "target_platform":"linux-x64x11-gcc-6-3",
+ "extra_gn_arguments":"is_clang=false using_old_compiler=true"
+ }
+ ]
+}
diff --git a/.github/config/linux.json b/.github/config/linux.json
new file mode 100644
index 0000000..79ceb0b
--- /dev/null
+++ b/.github/config/linux.json
@@ -0,0 +1,48 @@
+{
+ "docker_service": "build-linux",
+ "on_host_test": true,
+ "on_host_test_shards": ["0", "1", "2", "3", "blackbox", "wpt"],
+ "platforms": [
+ "linux-x64x11",
+ "linux-x64x11-egl",
+ "linux-x64x11-skia",
+ "linux-x64x11-sbversion-13",
+ "linux-x64x11-sbversion-14",
+ "linux-x64x11-sbversion-15"
+ ],
+ "includes": [
+ {
+ "name":"x64",
+ "platform":"linux-x64x11",
+ "target_platform":"linux-x64x11"
+ },
+ {
+ "name":"egl",
+ "platform":"linux-x64x11-egl",
+ "target_platform":"linux-x64x11-egl"
+ },
+ {
+ "name":"skia",
+ "platform":"linux-x64x11-skia",
+ "target_platform":"linux-x64x11-skia"
+ },
+ {
+ "name":"sbversion-13",
+ "platform":"linux-x64x11-sbversion-13",
+ "target_platform":"linux-x64x11",
+ "sb_api_version":"sb_api_version=13"
+ },
+ {
+ "name":"sbversion-14",
+ "platform":"linux-x64x11-sbversion-14",
+ "target_platform":"linux-x64x11",
+ "sb_api_version":"sb_api_version=14"
+ },
+ {
+ "name":"sbversion-15",
+ "platform":"linux-x64x11-sbversion-15",
+ "target_platform":"linux-x64x11",
+ "sb_api_version":"sb_api_version=15"
+ }
+ ]
+}
diff --git a/.github/config/raspi-2-skia.json b/.github/config/raspi-2-skia.json
new file mode 100644
index 0000000..f99fb73
--- /dev/null
+++ b/.github/config/raspi-2-skia.json
@@ -0,0 +1,15 @@
+{
+ "docker_service": "build-raspi",
+ "platforms": [
+ "raspi-2-skia"
+ ],
+ "includes": [
+ {
+ "name":"raspi-2-skia",
+ "platform":"raspi-2-skia",
+ "target_platform":"raspi-2-skia",
+ "target_cpu":"target_cpu=\\\"arm\\\"",
+ "extra_gn_arguments": "is_clang=false"
+ }
+ ]
+}
diff --git a/.github/config/raspi-2.json b/.github/config/raspi-2.json
new file mode 100644
index 0000000..ab2a1e6
--- /dev/null
+++ b/.github/config/raspi-2.json
@@ -0,0 +1,58 @@
+{
+ "docker_service": "build-raspi",
+ "on_device_test": {
+ "enabled": true,
+ "tests": [
+ "0",
+ "1",
+ "2",
+ "3",
+ "4",
+ "5"
+ ],
+ "test_attempts": 2
+ },
+ "platforms": [
+ "raspi-2",
+ "raspi-2-sbversion-13",
+ "raspi-2-sbversion-14",
+ "raspi-2-sbversion-15"
+ ],
+ "includes": [
+ {
+ "name":"raspi",
+ "platform":"raspi-2",
+ "target_platform":"raspi-2",
+ "target_cpu":"target_cpu=\\\"arm\\\"",
+ "extra_gn_arguments": "is_clang=false",
+ "dimension": "release_version=regex:10.*"
+ },
+ {
+ "name":"sbversion-13",
+ "platform":"raspi-2-sbversion-13",
+ "target_platform":"raspi-2",
+ "target_cpu":"target_cpu=\\\"arm\\\"",
+ "extra_gn_arguments":"is_clang=false",
+ "sb_api_version": "sb_api_version=13",
+ "dimension": "release_version=regex:10.*"
+ },
+ {
+ "name":"sbversion-14",
+ "platform":"raspi-2-sbversion-14",
+ "target_platform":"raspi-2",
+ "target_cpu":"target_cpu=\\\"arm\\\"",
+ "extra_gn_arguments":"is_clang=false",
+ "sb_api_version": "sb_api_version=14",
+ "dimension": "release_version=regex:10.*"
+ },
+ {
+ "name":"sbversion-15",
+ "platform":"raspi-2-sbversion-15",
+ "target_platform":"raspi-2",
+ "target_cpu":"target_cpu=\\\"arm\\\"",
+ "extra_gn_arguments":"is_clang=false",
+ "sb_api_version": "sb_api_version=15",
+ "dimension": "release_version=regex:10.*"
+ }
+ ]
+}
diff --git a/.github/config/stub.json b/.github/config/stub.json
new file mode 100644
index 0000000..792466f
--- /dev/null
+++ b/.github/config/stub.json
@@ -0,0 +1,13 @@
+{
+ "docker_service": "build-linux-stub",
+ "platforms": [
+ "stub"
+ ],
+ "includes": [
+ {
+ "name":"stub",
+ "platform":"stub",
+ "target_platform":"stub"
+ }
+ ]
+}
diff --git a/.github/config/win32.json b/.github/config/win32.json
new file mode 100644
index 0000000..c65092c
--- /dev/null
+++ b/.github/config/win32.json
@@ -0,0 +1,17 @@
+{
+ "docker_service": "build-win-win32",
+ "docker_runner_service": "runner-win-win32",
+ "platforms": [
+ "win32"
+ ],
+ "on_host_test": true,
+ "on_host_test_shards": ["0", "1", "2", "3"],
+ "includes": [
+ {
+ "name":"win32",
+ "platform":"win32",
+ "target_platform":"win-win32",
+ "extra_gn_arguments":"is_clang=false visual_studio_path=\\\"C:/BuildTools/VC/Tools/MSVC/14.15.26726\\\" msvc_path=\\\"C:/BuildTools/VC/Tools/MSVC/14.15.26726\\\""
+ }
+ ]
+}
diff --git a/.github/workflows/android.yaml b/.github/workflows/android.yaml
new file mode 100644
index 0000000..f94dc7e
--- /dev/null
+++ b/.github/workflows/android.yaml
@@ -0,0 +1,48 @@
+name: android
+
+on:
+ pull_request:
+ types: [ready_for_review, opened, reopened, synchronize, labeled]
+ branches:
+ - main
+ - feature/*
+ push:
+ branches:
+ - main
+ - feature/*
+ schedule:
+ # GMT timezone.
+ - cron: '0 4 * * *'
+ workflow_dispatch:
+ inputs:
+ nightly:
+ description: 'Nightly workflow.'
+ required: true
+ type: boolean
+ default: false
+
+jobs:
+ android-arm64:
+ uses: ./.github/workflows/main.yaml
+ permissions:
+ packages: write
+ pull-requests: write
+ with:
+ platform: android-arm64
+ nightly: ${{ github.event.inputs.nightly }}
+ android-x86:
+ uses: ./.github/workflows/main.yaml
+ permissions:
+ packages: write
+ pull-requests: write
+ with:
+ platform: android-x86
+ nightly: ${{ github.event.inputs.nightly }}
+ android-arm:
+ uses: ./.github/workflows/main.yaml
+ permissions:
+ packages: write
+ pull-requests: write
+ with:
+ platform: android-arm
+ nightly: ${{ github.event.inputs.nightly }}
diff --git a/.github/workflows/evergreen.yaml b/.github/workflows/evergreen.yaml
new file mode 100644
index 0000000..ef12fdf
--- /dev/null
+++ b/.github/workflows/evergreen.yaml
@@ -0,0 +1,69 @@
+name: evergreen
+
+on:
+ pull_request:
+ types: [ready_for_review, opened, reopened, synchronize, labeled]
+ branches:
+ - main
+ - feature/*
+ push:
+ branches:
+ - main
+ - feature/*
+ schedule:
+ # GMT timezone.
+ - cron: '0 5 * * *'
+ workflow_dispatch:
+ inputs:
+ nightly:
+ description: 'Nightly workflow.'
+ required: true
+ type: boolean
+ default: false
+
+jobs:
+ evergreen-x64:
+ uses: ./.github/workflows/main.yaml
+ permissions:
+ packages: write
+ pull-requests: write
+ with:
+ platform: evergreen-x64
+ nightly: ${{ github.event.inputs.nightly }}
+ run_api_leak_detector: true
+ evergreen-arm-hardfp:
+ uses: ./.github/workflows/main.yaml
+ permissions:
+ packages: write
+ pull-requests: write
+ with:
+ platform: evergreen-arm-hardfp
+ nightly: ${{ github.event.inputs.nightly }}
+ run_api_leak_detector: true
+ evergreen-arm-softfp:
+ uses: ./.github/workflows/main.yaml
+ permissions:
+ packages: write
+ pull-requests: write
+ with:
+ platform: evergreen-arm-softfp
+ nightly: ${{ github.event.inputs.nightly }}
+ run_api_leak_detector: true
+ evergreen-arm64:
+ uses: ./.github/workflows/main.yaml
+ permissions:
+ packages: write
+ pull-requests: write
+ with:
+ platform: evergreen-arm64
+ nightly: ${{ github.event.inputs.nightly }}
+ run_api_leak_detector: true
+ evergreen-x86:
+ uses: ./.github/workflows/main.yaml
+ permissions:
+ packages: write
+ pull-requests: write
+ with:
+ platform: evergreen-x86
+ nightly: ${{ github.event.inputs.nightly }}
+ run_api_leak_detector: true
diff --git a/.github/workflows/gradle.yaml b/.github/workflows/gradle.yaml
new file mode 100644
index 0000000..88ce9fd
--- /dev/null
+++ b/.github/workflows/gradle.yaml
@@ -0,0 +1,33 @@
+name: Java Tests
+
+on:
+ pull_request:
+ push:
+ branches:
+ - main
+ - feature/*
+
+concurrency:
+ group: '${{ github.workflow }}-${{ github.event_name }}-${{ inputs.platform }} @ ${{ github.event.pull_request.number || github.sha }}'
+ cancel-in-progress: true
+
+permissions: {}
+
+jobs:
+ build:
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: kaidokert/checkout@v3.5.999
+ - name: Set up JDK 11
+ uses: actions/setup-java@v3
+ with:
+ distribution: 'zulu'
+ java-version: 11
+ - name: Validate Gradle wrapper
+ uses: gradle/wrapper-validation-action@ccb4328a959376b642e027874838f60f8e596de3 #v1.0.6
+ - name: Build with Gradle
+ uses: gradle/gradle-build-action@749f47bda3e44aa060e82d7b3ef7e40d953bd629 #v2.4.2
+ with:
+ arguments: test
+ build-root-directory: starboard/android/apk
diff --git a/.github/workflows/label-cherry-pick.yaml b/.github/workflows/label-cherry-pick.yaml
new file mode 100644
index 0000000..6ac390a
--- /dev/null
+++ b/.github/workflows/label-cherry-pick.yaml
@@ -0,0 +1,99 @@
+name: Label Cherry Pick
+
+on:
+ pull_request_target:
+ types:
+ - labeled
+ - closed
+
+jobs:
+ prepare_branch_list:
+ runs-on: ubuntu-latest
+ outputs:
+ target_branch: ${{ steps.set-branches.outputs.target_branch }}
+ steps:
+ - name: Set Branches
+ id: set-branches
+ env:
+ PR_LABELS: ${{ toJson(github.event.pull_request.labels) }}
+ EVENT_ACTION: ${{ github.event.action }}
+ LABEL_NAME: ${{ github.event.label.name }}
+ BASE_REF: ${{ github.base_ref }}
+ run: |
+ if [[ $EVENT_ACTION == 'closed' ]]; then
+ labels=$(echo "$PR_LABELS" | jq -r '.[].name')
+ else
+ labels=$LABEL_NAME
+ fi
+
+ branches=("24.lts.1+" "23.lts.1+" "22.lts.1+" "21.lts.1+" "20.lts.1+" "19.lts.1+" "rc_11" "COBALT_9")
+ filtered_branches=()
+ for branch in "${branches[@]}"; do
+ if [[ $branch == $BASE_REF ]]; then
+ continue
+ fi
+
+ for label in $labels; do
+ if [[ $label == "cp-$branch" ]]; then
+ filtered_branches+=("$branch")
+ fi
+ done
+ done
+
+ echo "target_branch=$(echo -n "$filtered_branches" | jq -cRs 'split("\n")')" >> $GITHUB_OUTPUT
+
+ cherry_pick:
+ runs-on: ubuntu-latest
+ needs: prepare_branch_list
+ if: |
+ needs.prepare_branch_list.outputs.target_branch != '[]' &&
+ github.event.pull_request.merged == true &&
+ github.event.pull_request.merge_commit_sha != null
+ strategy:
+ matrix:
+ target_branch: ${{ fromJson(needs.prepare_branch_list.outputs.target_branch) }}
+ env:
+ ACCESS_TOKEN: ${{ secrets.CHERRY_PICK_TOKEN }}
+ REPOSITORY: ${{ github.repository }}
+ GITHUB_REF: ${{ github.ref }}
+ MERGE_COMMIT_SHA: ${{ github.event.pull_request.merge_commit_sha }}
+ steps:
+ - name: Checkout repository
+ uses: kaidokert/checkout@v3.5.999
+ with:
+ ref: ${{ matrix.target_branch }}
+ fetch-depth: 0
+ persist-credentials: false
+
+ - name: Setup Git
+ run: |
+ git config --global user.name "GitHub Release Automation"
+ git config --global user.email "github@google.com"
+
+ - name: Cherry pick merge commit
+ run: |
+ git fetch origin ${{ matrix.target_branch }}
+ set +e
+ git cherry-pick -x $MERGE_COMMIT_SHA
+ RES=$?
+ set -e
+ if [ $RES -eq 0 ]; then
+ echo "CREATE_PR_AS_DRAFT=false" >> $GITHUB_ENV
+ else
+ echo "CREATE_PR_AS_DRAFT=true" >> $GITHUB_ENV
+ git add .
+ git cherry-pick --continue
+ fi
+
+ - name: Create Pull Request
+ uses: peter-evans/create-pull-request@2b011faafdcbc9ceb11414d64d0573f37c774b04 # v4.2.3
+ with:
+ token: ${{ secrets.CHERRY_PICK_TOKEN }}
+ draft: ${{ env.CREATE_PR_AS_DRAFT }}
+ base: ${{ matrix.target_branch }}
+ branch: "${{ matrix.target_branch }}-${{ github.event.pull_request.number }}"
+ committer: GitHub Release Automation <github@google.com>
+ reviewers: ${{ github.event.pull_request.user.login }}
+ title: "Cherry pick PR #${{ github.event.pull_request.number }}: ${{ github.event.pull_request.title }}"
+ body: |
+ "Refer to the original PR: https://github.com/${{ github.repository }}/pull/${{ github.event.pull_request.number }}"
diff --git a/.github/workflows/lint.yaml b/.github/workflows/lint.yaml
new file mode 100644
index 0000000..6a138cb
--- /dev/null
+++ b/.github/workflows/lint.yaml
@@ -0,0 +1,70 @@
+name: lint
+
+on:
+ pull_request:
+ types:
+ - opened
+ - edited
+ - reopened
+ - synchronize
+ push:
+ branches:
+ - main
+ - feature/*
+
+concurrency:
+ group: '${{ github.workflow }}-${{ github.event_name }}-${{ inputs.platform }} @ ${{ github.event.pull_request.number || github.sha }}'
+ cancel-in-progress: true
+
+permissions: {}
+
+jobs:
+ lint:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Install clang-format Dependencies
+ run: |
+ sudo apt-get update
+ sudo apt-get install libncurses5
+ - name: Download GN via CIPD
+ env:
+ GN_SHA256SUM: 'af7b2dcb3905bca56655e12131b365f1cba8e159db80d2022330c4f522fab2ef /tmp/gn.zip'
+ GN_HASH: r3styzkFvKVmVeEhMbNl8cuo4VnbgNICIzDE9SL6su8C
+ run: |
+ set -e -x
+ curl --location --silent --output /tmp/gn.zip "https://chrome-infra-packages.appspot.com/dl/gn/gn/linux-amd64/+/${GN_HASH}"
+ echo ${GN_SHA256SUM} | sha256sum --check
+ unzip /tmp/gn.zip -d /usr/local/bin
+ rm /tmp/gn.zip
+ - name: Checkout
+ uses: kaidokert/checkout@v3.5.999
+ with:
+ fetch-depth: 0
+ persist-credentials: false
+ - name: Setup Python
+ uses: actions/setup-python@v4
+ with:
+ python-version: '^3.7.x'
+ - name: Install Pip Packages
+ run: pip install -r ${GITHUB_WORKSPACE}/requirements.txt
+ - name: Download Resources
+ run: python ${GITHUB_WORKSPACE}/download_resources.py
+ - name: pre-commit
+ uses: ./.github/actions/pre_commit
+ with:
+ base_ref: ${{ github.event.pull_request.base.sha && github.event.pull_request.base.sha || github.event.before }}
+ check-bug-id:
+ name: Check Bug ID
+ runs-on: ubuntu-latest
+ steps:
+ - name: Check Bug ID Present
+ # v2
+ uses: gsactions/commit-message-checker@16fa2d5de096ae0d35626443bcd24f1e756cafee
+ with:
+ excludeTitle: true
+ excludeDescription: true
+ checkAllCommitMessages: true
+ accessToken: ${{ secrets.GITHUB_TOKEN }}
+ pattern: '^b\/\d+$'
+ flags: 'gm'
+ error: 'Commit message should include at least one bug ID on a separate line (e.g. b/12345).'
diff --git a/.github/workflows/linux.yaml b/.github/workflows/linux.yaml
new file mode 100644
index 0000000..cf18341
--- /dev/null
+++ b/.github/workflows/linux.yaml
@@ -0,0 +1,48 @@
+name: linux
+
+on:
+ pull_request:
+ types: [ready_for_review, opened, reopened, synchronize, labeled]
+ branches:
+ - main
+ - feature/*
+ push:
+ branches:
+ - main
+ - feature/*
+ schedule:
+ # GMT timezone.
+ - cron: '0 4 * * *'
+ workflow_dispatch:
+ inputs:
+ nightly:
+ description: 'Nightly workflow.'
+ required: true
+ type: boolean
+ default: false
+
+jobs:
+ linux-x64:
+ uses: ./.github/workflows/main.yaml
+ permissions:
+ packages: write
+ pull-requests: write
+ with:
+ platform: linux
+ nightly: ${{ github.event.inputs.nightly }}
+ linux-clang-3-9:
+ uses: ./.github/workflows/main.yaml
+ permissions:
+ packages: write
+ pull-requests: write
+ with:
+ platform: linux-clang-3-9
+ nightly: ${{ github.event.inputs.nightly }}
+ linux-gcc-6-3:
+ uses: ./.github/workflows/main.yaml
+ permissions:
+ packages: write
+ pull-requests: write
+ with:
+ platform: linux-gcc-6-3
+ nightly: ${{ github.event.inputs.nightly }}
diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml
new file mode 100644
index 0000000..d187bb5
--- /dev/null
+++ b/.github/workflows/main.yaml
@@ -0,0 +1,338 @@
+# Reusable Cobalt CI workflow.
+
+name: main
+
+on:
+ workflow_call:
+ inputs:
+ platform:
+ description: 'Cobalt platform.'
+ required: true
+ type: string
+ nightly:
+ description: 'Nightly workflow.'
+ required: true
+ type: string
+ default: 'false'
+ run_api_leak_detector:
+ description: 'Whether to run the api leak detector.'
+ required: false
+ type: boolean
+ default: false
+ leak_manifest_filename:
+ description: 'Path to the leak manifest.'
+ required: false
+ type: string
+ default: ""
+
+# Global env vars.
+env:
+ REGISTRY: ghcr.io
+ IPV6_AVAILABLE: 0
+ LANG: en_US.UTF-8
+ IS_BUILDBOT_DOCKER: 1
+ IS_CI: 1
+ IS_DOCKER: 1
+ NINJA_STATUS: '[%e sec | %f/%t %u remaining | %c/sec | j%r]'
+ SCCACHE: 1
+ SCCACHE_GCS_BUCKET: cobalt-actions-sccache-linux
+ SCCACHE_GCS_OAUTH_URL: http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token
+ SCCACHE_GCS_RW_MODE: READ_WRITE
+ SCCACHE_IDLE_TIMEOUT: 0 # prevent sccache server from shutting down after long idle.
+ STARBOARD_TOOLCHAINS_DIR: /root/starboard-toolchains
+
+concurrency:
+ group: '${{ github.workflow }}-${{ github.event_name }}-${{ inputs.platform }} @ ${{ github.event.pull_request.number || github.sha }}'
+ cancel-in-progress: true
+
+# A workflow run is made up of one or more jobs that can run sequentially or in parallel
+jobs:
+ # Retrieves configuration from json file.
+ initialize:
+ runs-on: ubuntu-latest
+ permissions:
+ pull-requests: write
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ GITHUB_PR_REPO_URL: ${{ github.event.pull_request.base.repo.url }}
+ GITHUB_EVENT_NUMBER: ${{ github.event.number }}
+ # All triggers except draft PRs, unless PR is labeled with runtest
+ if: |
+ github.event_name != 'pull_request' ||
+ (
+ github.event.pull_request.draft == false ||
+ (
+ github.event.action == 'labeled' &&
+ github.event.label.name == 'runtest'
+ )
+ )
+ steps:
+ - id: checkout
+ uses: kaidokert/checkout@v3.5.999
+ with:
+ fetch-depth: 1
+ persist-credentials: false
+ - name: Remove runtest if exists
+ if: github.event_name == 'pull_request'
+ continue-on-error: true # Ignore this step if we cannot remove the label.
+ run: |
+ set +e
+ curl \
+ -X DELETE \
+ -H "Accept: application/vnd.github+json" \
+ -H "Authorization: Bearer ${GITHUB_TOKEN}" \
+ ${GITHUB_PR_REPO_URL}/issues/${GITHUB_EVENT_NUMBER}/labels/runtest
+ shell: bash
+ - id: set-platforms
+ run: echo "platforms=$(cat ${GITHUB_WORKSPACE}/.github/config/${{ inputs.platform }}.json | jq -c '.platforms')" >> $GITHUB_ENV
+ - id: set-includes
+ run: echo "includes=$(cat ${GITHUB_WORKSPACE}/.github/config/${{ inputs.platform }}.json | jq -c '.includes')" >> $GITHUB_ENV
+ - id: set-on-device-test
+ run: echo "on_device_test=$(cat ${GITHUB_WORKSPACE}/.github/config/${{ inputs.platform }}.json | jq -rc '.on_device_test')" >> $GITHUB_ENV
+ - id: set-on-device-test-attempts
+ run: echo "on_device_test_attempts=$(cat ${GITHUB_WORKSPACE}/.github/config/${{ inputs.platform }}.json | jq -rc '.on_device_test.test_attempts // empty')" >> $GITHUB_ENV
+ - id: set-on-host-test
+ run: echo "on_host_test=$(cat ${GITHUB_WORKSPACE}/.github/config/${{ inputs.platform }}.json | jq -rc '.on_host_test')" >> $GITHUB_ENV
+ - id: set-on-host-test-shards
+ run: echo "on_host_test_shards=$(cat ${GITHUB_WORKSPACE}/.github/config/${{ inputs.platform }}.json | jq -c '.on_host_test_shards')" >> $GITHUB_ENV
+ - id: set-on-host-test-bootloader
+ run: echo "bootloader=$(cat ${GITHUB_WORKSPACE}/.github/config/${{ inputs.platform }}.json | jq -rc '.bootloader')" >> $GITHUB_ENV
+ - id: set-docker-service
+ run: |
+ echo "docker_service=$(cat ${GITHUB_WORKSPACE}/.github/config/${{ inputs.platform }}.json | jq -r '.docker_service')" >> $GITHUB_ENV
+ echo $platforms
+ outputs:
+ platforms: ${{ env.platforms }}
+ includes: ${{ env.includes }}
+ on_device_test: ${{ env.on_device_test }}
+ on_device_test_attempts: ${{ env.on_device_test_attempts }}
+ on_host_test: ${{ env.on_host_test }}
+ on_host_test_shards: ${{ env.on_host_test_shards }}
+ bootloader: ${{ env.bootloader }}
+ docker_service: ${{ env.docker_service }}
+
+ # Builds, tags, and pushes Cobalt docker build images to ghr.
+ docker-build-image:
+ needs: [initialize]
+ runs-on: [self-hosted, linux, X64]
+ permissions:
+ packages: write
+ steps:
+ - name: Checkout files
+ uses: kaidokert/checkout@v3.5.999
+ with:
+ fetch-depth: 2
+ persist-credentials: false
+ - name: Login to Docker Registry ${{env.REGISTRY}}
+ uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a # v2.1.0
+ with:
+ registry: ${{ env.REGISTRY }}
+ username: ${{ github.actor }}
+ password: ${{ secrets.GITHUB_TOKEN }}
+ - name: Build docker image
+ id: build-docker-image
+ uses: ./.github/actions/docker
+ with:
+ docker_service: ${{ needs.initialize.outputs.docker_service }}
+ docker_image: cobalt-${{ needs.initialize.outputs.docker_service }}
+ - name: Set Docker Tag Output
+ id: set-docker-tag-output
+ run: |
+ echo $DOCKER_TAG
+ echo "docker_tag=$DOCKER_TAG" >> $GITHUB_ENV
+ outputs:
+ docker_tag: ${{env.docker_tag}}
+
+ # Builds, tags, and pushes Cobalt unit test image to ghr.
+ docker-unittest-image:
+ if: needs.initialize.outputs.on_host_test == 'true'
+ needs: [initialize]
+ permissions:
+ packages: write
+ runs-on: [self-hosted, linux, X64]
+ steps:
+ - name: Checkout files
+ uses: kaidokert/checkout@v3.5.999
+ with:
+ fetch-depth: 2
+ persist-credentials: false
+ - name: Login to Docker Registry ${{env.REGISTRY}}
+ uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a # v2.1.0
+ with:
+ registry: ${{ env.REGISTRY }}
+ username: ${{ github.actor }}
+ password: ${{ secrets.GITHUB_TOKEN }}
+ - name: Build docker image
+ id: build-docker-image
+ uses: ./.github/actions/docker
+ with:
+ docker_service: linux-x64x11-unittest
+ docker_image: cobalt-linux-x64x11-unittest
+ - name: Set Docker Tag Output
+ id: set-docker-unittest-tag-output
+ run: |
+ echo $DOCKER_TAG
+ echo "docker_unittest_tag=$DOCKER_TAG" >> $GITHUB_ENV
+ outputs:
+ docker_unittest_tag: ${{env.docker_unittest_tag}}
+
+ # Runs builds.
+ build:
+ needs: [initialize, docker-build-image]
+ permissions: {}
+ runs-on: [self-hosted, linux, X64]
+ name: ${{matrix.name}}_${{matrix.config}}
+ strategy:
+ fail-fast: false
+ matrix:
+ platform: ${{ fromJson(needs.initialize.outputs.platforms) }}
+ include: ${{ fromJson(needs.initialize.outputs.includes) }}
+ config: [devel, debug, qa, gold]
+ container: ${{ needs.docker-build-image.outputs.docker_tag }}
+ env:
+ # We want temp folder to be on tmpfs which makes workloads faster.
+ # However, dind container ends up having / folder mounted on overlay
+ # filesystem, whereas /__w which contains Cobalt source code is on tmpfs.
+ TMPDIR: /__w/_temp
+ steps:
+ - name: Checkout
+ uses: kaidokert/checkout@v3.5.999
+ with:
+ # Use fetch depth of 0 to get full history for a valid build id.
+ fetch-depth: 0
+ persist-credentials: false
+ - name: GN
+ uses: ./.github/actions/gn
+ - name: Build Cobalt
+ uses: ./.github/actions/build
+ - name: Run API Leak Detector
+ uses: ./.github/actions/api_leak_detector
+ if: inputs.run_api_leak_detector
+ with:
+ relative_manifest_path: ${{ inputs.leak_manifest_filename }}
+ - name: Upload Nightly Artifacts
+ if: ${{ ( inputs.nightly == 'true' || github.event_name == 'schedule' ) && matrix.config != 'debug' }}
+ uses: ./.github/actions/upload_nightly_artifacts
+ - name: Upload On Host Test Artifacts
+ if: ${{ matrix.config == 'devel' && needs.initialize.outputs.on_host_test == 'true' }}
+ uses: ./.github/actions/upload_test_artifacts
+ with:
+ type: onhost
+ os: linux
+ # For some reason passing needs.initialize.outputs.bootloader as parameter to build
+ # action didn't work, so instead we set an env var.
+ - name: Set bootloader config
+ if: ${{ needs.initialize.outputs.bootloader != 'null' }}
+ run: echo "COBALT_BOOTLOADER=${{needs.initialize.outputs.bootloader}}" >> $GITHUB_ENV
+ # Build bootloader for on-host tests if necessary.
+ - name: Bootloader GN
+ if: ${{ needs.initialize.outputs.bootloader != 'null' && matrix.config == 'devel' }}
+ uses: ./.github/actions/gn
+ - name: Build Bootloader
+ if: ${{ needs.initialize.outputs.bootloader != 'null' && matrix.config == 'devel' }}
+ uses: ./.github/actions/build
+ - name: Upload Bootloader On Host Test Artifacts
+ if: ${{ needs.initialize.outputs.bootloader != 'null' && matrix.config == 'devel' && needs.initialize.outputs.on_host_test == 'true'}}
+ uses: ./.github/actions/upload_test_artifacts
+ with:
+ type: onhost
+ os: linux
+ - name: Upload On Device Test Artifacts
+ if: |
+ matrix.config == 'devel' &&
+ fromJSON(needs.initialize.outputs.on_device_test).enabled == true &&
+ (
+ github.event_name != 'pull_request' ||
+ contains(github.event.pull_request.labels.*.name, 'on_device')
+ )
+ uses: ./.github/actions/upload_test_artifacts
+ with:
+ type: ondevice
+ os: linux
+
+ # Runs on-host integration and unit tests.
+ on-device-test:
+ needs: [initialize, build]
+ # Run ODT when on_device label is applied on PR.
+ # Also, run ODT on push and schedule if not explicitly disabled via repo vars.
+ if: |
+ fromJSON(needs.initialize.outputs.on_device_test).enabled == true && ((
+ github.event_name == 'pull_request' &&
+ contains(github.event.pull_request.labels.*.name, 'on_device') ) || ((
+ inputs.nightly == 'true' || github.event_name == 'schedule') &&
+ vars.RUN_ODT_TESTS_ON_NIGHTLY != 'False') ||
+ ( github.event_name == 'push' && vars.RUN_ODT_TESTS_ON_POSTSUBMIT != 'False' ) )
+ runs-on: [self-hosted, linux, X64]
+ name: ${{ matrix.name }}_on_device_${{ matrix.shard }}
+ container: ${{ needs.docker-unittest-image.outputs.docker_unittest_tag }}
+ permissions: {}
+ strategy:
+ fail-fast: false
+ matrix:
+ platform: ${{ fromJson(needs.initialize.outputs.platforms) }}
+ config: [devel]
+ shard: ${{ fromJson(needs.initialize.outputs.on_device_test).tests }}
+ include: ${{ fromJson(needs.initialize.outputs.includes) }}
+ env:
+ COBALT_BOOTLOADER: ${{ needs.initialize.outputs.bootloader }}
+ ON_DEVICE_TEST_ATTEMPTS: ${{ needs.initialize.outputs.on_device_test_attempts }}
+ steps:
+ - name: Checkout
+ uses: kaidokert/checkout@v3.5.999
+ with:
+ fetch-depth: 1
+ persist-credentials: false
+ - name: Run Tests (${{ matrix.shard }})
+ uses: ./.github/actions/on_device_tests
+
+ # Runs on-host integration and unit tests.
+ on-host-test:
+ needs: [initialize, docker-unittest-image, build]
+ permissions: {}
+ if: needs.initialize.outputs.on_host_test == 'true'
+ runs-on: [self-hosted, linux, X64]
+ name: ${{matrix.name}}_${{matrix.shard}}_test
+ strategy:
+ fail-fast: false
+ matrix:
+ platform: ${{ fromJson(needs.initialize.outputs.platforms) }}
+ shard: ${{ fromJson(needs.initialize.outputs.on_host_test_shards) }}
+ config: [devel]
+ include: ${{ fromJson(needs.initialize.outputs.includes) }}
+ container: ${{ needs.docker-unittest-image.outputs.docker_unittest_tag }}
+ env:
+ DISPLAY: :99
+ # For some reason tests complaining about HOME set to /github/home
+ # with permission denied error.
+ HOME: /root
+ COBALT_BOOTLOADER: ${{needs.initialize.outputs.bootloader}}
+ steps:
+ - name: Checkout
+ uses: kaidokert/checkout@v3.5.999
+ with:
+ fetch-depth: 1
+ persist-credentials: false
+ - name: Run Tests
+ uses: ./.github/actions/on_host_test
+ with:
+ os: linux
+
+ # Gets unit test report from on host tests and prints it.
+ on-host-unit-test-report:
+ needs: [on-host-test]
+ permissions: {}
+ if: failure()
+ runs-on: ubuntu-latest
+ steps:
+ - name: Collect Unit Test Reports
+ uses: actions/download-artifact@v3
+ with:
+ name: unit-test-reports
+ path: unit-test-reports
+ - name: Print Unit Test Reports
+ run: |
+ for filename in ${GITHUB_WORKSPACE}/unit-test-reports/*; do
+ basename $filename
+ cat $filename
+ echo
+ done
diff --git a/.github/workflows/main_win.yaml b/.github/workflows/main_win.yaml
new file mode 100644
index 0000000..4827f3d
--- /dev/null
+++ b/.github/workflows/main_win.yaml
@@ -0,0 +1,183 @@
+# Reusable Cobalt CI workflow.
+
+name: main
+
+on:
+ workflow_call:
+ inputs:
+ platform:
+ description: 'Cobalt platform.'
+ required: true
+ type: string
+ nightly:
+ description: 'Nightly workflow.'
+ required: true
+ type: string
+ default: 'false'
+
+# Global env vars.
+env:
+ REGISTRY: ghcr.io
+ IPV6_AVAILABLE: 0
+ LANG: en_US.UTF-8
+ IS_BUILDBOT_DOCKER: 1
+ #BUILD_ID_SERVER_URL:
+ IS_CI: 1
+ IS_DOCKER: 1
+ NINJA_STATUS: '[%e sec | %f/%t %u remaining | %c/sec | j%r]'
+ SCCACHE: 1
+ SCCACHE_GCS_BUCKET: cobalt-actions-sccache-windows
+ SCCACHE_GCS_OAUTH_URL: http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token
+ SCCACHE_GCS_RW_MODE: READ_WRITE
+ SCCACHE_IDLE_TIMEOUT: 0 # prevent sccache server from shutting down after long idle.
+ STARBOARD_TOOLCHAINS_DIR: /root/starboard-toolchains
+
+concurrency:
+ group: '${{ github.workflow }}-${{ github.event_name }}-${{ inputs.platform }} @ ${{ github.event.pull_request.number || github.sha }}'
+ cancel-in-progress: true
+
+# A workflow run is made up of one or more jobs that can run sequentially or in parallel
+jobs:
+ # Generates build matrix based on json configuration file.
+ initialize:
+ runs-on: ubuntu-latest
+ permissions:
+ pull-requests: write
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ GITHUB_PR_REPO_URL: ${{ github.event.pull_request.base.repo.url }}
+ GITHUB_EVENT_NUMBER: ${{ github.event.number }}
+ # All triggers except draft PRs, unless PR is labeled with runtest
+ if: |
+ github.event_name != 'pull_request' ||
+ (
+ github.event.pull_request.draft == false ||
+ (
+ github.event.action == 'labeled' &&
+ github.event.label.name == 'runtest'
+ )
+ )
+ steps:
+ - id: Checkout
+ uses: kaidokert/checkout@v3.5.999 # Temporary version
+ with:
+ fetch-depth: 1
+ persist-credentials: false
+ - name: Remove runtest if exists
+ if: github.event_name == 'pull_request'
+ continue-on-error: true # Ignore this step if we cannot remove the label.
+ run: |
+ set +e
+ curl \
+ -X DELETE \
+ -H "Accept: application/vnd.github+json" \
+ -H "Authorization: Bearer ${GITHUB_TOKEN}" \
+ ${GITHUB_PR_REPO_URL}/issues/${GITHUB_EVENT_NUMBER}/labels/runtest
+ shell: bash
+ - id: set-platforms
+ run: echo "platforms=$(cat ${GITHUB_WORKSPACE}/.github/config/${{ inputs.platform }}.json | jq -c '.platforms')" >> $GITHUB_ENV
+ - id: set-includes
+ run: echo "includes=$(cat ${GITHUB_WORKSPACE}/.github/config/${{ inputs.platform }}.json | jq -c '.includes')" >> $GITHUB_ENV
+ - id: set-on-device-test
+ run: echo "on_device_test=$(cat ${GITHUB_WORKSPACE}/.github/config/${{ inputs.platform }}.json | jq -rc '.on_device_test.enabled')" >> $GITHUB_ENV
+ - id: set-on-host-test
+ run: echo "on_host_test=$(cat ${GITHUB_WORKSPACE}/.github/config/${{ inputs.platform }}.json | jq -rc '.on_host_test')" >> $GITHUB_ENV
+ - id: set-on-host-test-shards
+ run: echo "on_host_test_shards=$(cat ${GITHUB_WORKSPACE}/.github/config/${{ inputs.platform }}.json | jq -c '.on_host_test_shards')" >> $GITHUB_ENV
+ - id: set-docker-service
+ run: echo "docker_service=$(cat ${GITHUB_WORKSPACE}/.github/config/${{ inputs.platform }}.json | jq -rc '.docker_service')" >> $GITHUB_ENV
+ - id: set-docker-runner-service
+ run: echo "docker_runner_service=$(cat ${GITHUB_WORKSPACE}/.github/config/${{ inputs.platform }}.json | jq -rc '.docker_runner_service')" >> $GITHUB_ENV
+ outputs:
+ platforms: ${{ env.platforms }}
+ includes: ${{ env.includes }}
+ on_device_test: ${{ env.on_device_test }}
+ on_host_test: ${{ env.on_host_test }}
+ on_host_test_shards: ${{ env.on_host_test_shards }}
+ docker_service: ${{ env.docker_service }}
+ docker_runner_service: ${{ env.docker_runner_service }}
+ # Build windows docker images.
+ build-docker-image:
+ needs: [initialize]
+ permissions:
+ packages: write
+ runs-on: windows-2019
+ steps:
+ - name: Checkout files
+ uses: kaidokert/checkout@v3.5.999
+ with:
+ fetch-depth: 2
+ persist-credentials: false
+ - name: Login to Docker Registry ${{env.REGISTRY}}
+ uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a # v2.1.0
+ with:
+ registry: ${{ env.REGISTRY }}
+ username: ${{ github.actor }}
+ password: ${{ secrets.GITHUB_TOKEN }}
+ - name: Build docker image
+ id: build-docker-image
+ uses: ./.github/actions/docker_win
+ with:
+ service: ${{ needs.initialize.outputs.docker_service }}
+ - name: Build runner docker image
+ id: build-runner-docker-image
+ uses: ./.github/actions/docker_win
+ with:
+ service: ${{ needs.initialize.outputs.docker_runner_service }}
+ # Runs builds.
+ build:
+ needs: [initialize]
+ permissions: {}
+ runs-on: [self-hosted, X64, Windows]
+ name: ${{matrix.name}}_${{matrix.config}}
+ strategy:
+ fail-fast: false
+ matrix:
+ platform: ${{ fromJson(needs.initialize.outputs.platforms) }}
+ include: ${{ fromJson(needs.initialize.outputs.includes) }}
+ config: [devel, debug, qa, gold]
+ steps:
+ - name: Checkout
+ uses: kaidokert/checkout@v3.5.999
+ with:
+ # Use fetch depth of 0 to get full history for a valid build id.
+ fetch-depth: 0
+ persist-credentials: false
+ - name: GN
+ uses: ./.github/actions/gn
+ - name: Build Cobalt
+ uses: ./.github/actions/build
+ - name: Upload Nightly Artifacts
+ if: ${{ ( inputs.nightly == 'true' || github.event_name == 'schedule' ) && matrix.config != 'debug' }}
+ uses: ./.github/actions/upload_nightly_artifacts
+ - name: Upload On Host Test Artifacts
+ if: ${{ matrix.config == 'devel' && needs.initialize.outputs.on_host_test == 'true' }}
+ uses: ./.github/actions/upload_test_artifacts
+ with:
+ type: onhost
+ os: windows
+
+ # Runs on the host unit and integration tests.
+ on-host-test:
+ needs: [initialize, build]
+ permissions: {}
+ if: needs.initialize.outputs.on_host_test == 'true'
+ runs-on: [self-hosted, Windows, X64]
+ name: ${{matrix.name}}_${{matrix.shard}}_test
+ strategy:
+ fail-fast: false
+ matrix:
+ platform: ${{ fromJson(needs.initialize.outputs.platforms) }}
+ shard: ${{ fromJson(needs.initialize.outputs.on_host_test_shards) }}
+ config: [devel]
+ include: ${{ fromJson(needs.initialize.outputs.includes) }}
+ steps:
+ - name: Checkout
+ uses: kaidokert/checkout@v3.5.999
+ with:
+ fetch-depth: 1
+ persist-credentials: false
+ - name: Run Tests
+ uses: ./.github/actions/on_host_test
+ with:
+ os: windows
diff --git a/.github/workflows/manual-cherry-pick.yaml b/.github/workflows/manual-cherry-pick.yaml
new file mode 100644
index 0000000..d4bca26
--- /dev/null
+++ b/.github/workflows/manual-cherry-pick.yaml
@@ -0,0 +1,72 @@
+# Copyright 2023 The Cobalt Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Usage: Go to
+# https://https://github.com/youtube/cobalt/actions/workflows/manual-cherry-pick.yaml
+# and click "Run Workflow." Leave "Use Workflow From" set to "main", then
+# input the branch name and paste the cherry-pick commit and click Run. A PR
+# will be created.
+
+name: Release Branch Cherrypick
+on:
+ workflow_dispatch:
+ inputs:
+ # We use this instead of the "run on branch" argument because GitHub looks
+ # on that branch for a workflow.yml file, and we'd have to cherry-pick
+ # this file into those branches.
+ release_branch:
+ description: 'Release branch name (e.g. 23.lts.1+)'
+ required: true
+ type: string
+ git_commit:
+ description: 'Git commit to cherry-pick'
+ required: true
+ type: string
+
+jobs:
+ cherrypick:
+ name: Cherrypick to ${{ github.event.inputs.release_branch}} - ${{ github.event.inputs.git_commit }}
+ runs-on: ubuntu-latest
+ env:
+ ACCESS_TOKEN: ${{ secrets.CHERRY_PICK_TOKEN }}
+ RELEASE_BRANCH: ${{ github.event.inputs.release_branch }}
+ COMMIT_HASH: ${{ github.event.inputs.git_commit }}
+ REPOSITORY: ${{ github.repository }}
+ GITHUB_REF: ${{ github.ref }}
+ steps:
+ - name: Checkout code
+ uses: kaidokert/checkout@v3.5.999
+ with:
+ ref: ${{ env.RELEASE_BRANCH }}
+ persist-credentials: false
+ - name: Get some helpful info for formatting
+ id: cherrypick
+ run: |
+ git config --global user.name "GitHub Release Automation"
+ git config --global user.email "github@google.com"
+ git fetch origin $GITHUB_REF
+ git cherry-pick -x $COMMIT_HASH
+ echo "SHORTSHA=$(git log -1 $COMMIT_HASH --format="%h")" >> "$GITHUB_OUTPUT"
+ echo "TITLE=$(git log -1 $COMMIT_HASH --format="%s")" >> "$GITHUB_OUTPUT"
+ - name: Create Pull Request with changes
+ uses: peter-evans/create-pull-request@2b011faafdcbc9ceb11414d64d0573f37c774b04 # v4.2.3
+ with:
+ title: '${{ env.RELEASE_BRANCH }} cherry-pick: ${{ steps.cherrypick.outputs.SHORTSHA }} "${{ steps.cherrypick.outputs.TITLE }}"'
+ committer: GitHub Release Automation <github@google.com>
+ token: ${{ secrets.CHERRY_PICK_TOKEN }}
+ base: ${{ env.RELEASE_BRANCH }}
+ branch: ${{ env.RELEASE_BRANCH }}-${{ steps.cherrypick.outputs.SHORTSHA }}
+ reviewers: ${{ github.actor }}
+ body: |
+ Refer to the original commit: https://github.com/${{ github.repository }}/commit/${{ github.event.inputs.git_commit }}
diff --git a/.github/workflows/nightly_trigger.yaml b/.github/workflows/nightly_trigger.yaml
new file mode 100644
index 0000000..2cb01b6
--- /dev/null
+++ b/.github/workflows/nightly_trigger.yaml
@@ -0,0 +1,141 @@
+name: nightly_trigger
+
+on:
+ schedule:
+ # GMT timezone.
+ - cron: '30 4 * * *'
+ workflow_dispatch:
+
+jobs:
+ trigger_23:
+ permissions:
+ actions: write
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: kaidokert/checkout@v3.5.999
+ with:
+ fetch-depth: 1
+ ref: 23.lts.1+
+ persist-credentials: false
+ - name: Trigger Nightly
+ run: |
+ set -x
+ gh workflow run android_23.lts.1+ --ref 23.lts.1+ -f nightly=true
+ gh workflow run evergreen_23.lts.1+ --ref 23.lts.1+ -f nightly=true
+ gh workflow run linux_23.lts.1+ --ref 23.lts.1+ -f nightly=true
+ gh workflow run raspi-2_23.lts.1+ --ref 23.lts.1+ -f nightly=true
+ gh workflow run win32_23.lts.1+ --ref 23.lts.1+ -f nightly=true
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ trigger_22:
+ permissions:
+ actions: write
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: kaidokert/checkout@v3.5.999
+ with:
+ fetch-depth: 1
+ ref: 22.lts.1+
+ persist-credentials: false
+ - name: Trigger Nightly
+ run: |
+ set -x
+ gh workflow run android_22.lts.1+ --ref 22.lts.1+ -f nightly=true
+ gh workflow run evergreen_22.lts.1+ --ref 22.lts.1+ -f nightly=true
+ gh workflow run linux_22.lts.1+ --ref 22.lts.1+ -f nightly=true
+ gh workflow run raspi-2_22.lts.1+ --ref 22.lts.1+ -f nightly=true
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ trigger_21:
+ permissions:
+ actions: write
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: kaidokert/checkout@v3.5.999
+ with:
+ fetch-depth: 1
+ ref: 21.lts.1+
+ persist-credentials: false
+ - name: Trigger Nightly
+ run: |
+ set -x
+ gh workflow run evergreen_21.lts.1+ --ref 21.lts.1+ -f nightly=true
+ gh workflow run linux_21.lts.1+ --ref 21.lts.1+ -f nightly=true
+ gh workflow run raspi-2_21.lts.1+ --ref 21.lts.1+ -f nightly=true
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ trigger_20:
+ permissions:
+ actions: write
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: kaidokert/checkout@v3.5.999
+ with:
+ fetch-depth: 1
+ ref: 20.lts.1+
+ persist-credentials: false
+ - name: Trigger Nightly
+ run: |
+ set -x
+ gh workflow run linux_20.lts.1+ --ref 20.lts.1+ -f nightly=true
+ gh workflow run raspi-2_20.lts.1+ --ref 20.lts.1+ -f nightly=true
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ trigger_19:
+ permissions:
+ actions: write
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: kaidokert/checkout@v3.5.999
+ with:
+ fetch-depth: 1
+ ref: 19.lts.1+
+ persist-credentials: false
+ - name: Trigger Nightly
+ run: |
+ set -x
+ gh workflow run linux_19.lts.1+ --ref 19.lts.1+ -f nightly=true
+ gh workflow run raspi-2_19.lts.1+ --ref 19.lts.1+ -f nightly=true
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ trigger_rc_11:
+ permissions:
+ actions: write
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: kaidokert/checkout@v3.5.999
+ with:
+ fetch-depth: 1
+ ref: rc_11
+ persist-credentials: false
+ - name: Trigger Nightly
+ run: |
+ set -x
+ gh workflow run linux_rc_11 --ref rc_11 -f nightly=true
+ gh workflow run raspi-2_rc_11 --ref rc_11 -f nightly=true
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ trigger_cobalt_9:
+ permissions:
+ actions: write
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: kaidokert/checkout@v3.5.999
+ with:
+ fetch-depth: 1
+ ref: COBALT_9
+ persist-credentials: false
+ - name: Trigger Nightly
+ run: |
+ set -x
+ gh workflow run linux_COBALT_9 --ref COBALT_9 -f nightly=true
+ gh workflow run raspi-2_COBALT_9 --ref COBALT_9 -f nightly=true
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/nightly_trigger_24.lts.1+.yaml b/.github/workflows/nightly_trigger_24.lts.1+.yaml
new file mode 100644
index 0000000..f72b625
--- /dev/null
+++ b/.github/workflows/nightly_trigger_24.lts.1+.yaml
@@ -0,0 +1,30 @@
+name: nightly_trigger_24.lts.1+
+
+on:
+ schedule:
+ # GMT timezone.
+ - cron: '30 5 * * *'
+ workflow_dispatch:
+
+jobs:
+ trigger_24:
+ permissions:
+ actions: write
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: kaidokert/checkout@v3.5.999
+ with:
+ fetch-depth: 1
+ ref: 24.lts.1+
+ persist-credentials: false
+ - name: Trigger Nightly
+ run: |
+ set -x
+ gh workflow run android_24.lts.1+ --ref 24.lts.1+ -f nightly=true
+ gh workflow run evergreen_24.lts.1+ --ref 24.lts.1+ -f nightly=true
+ gh workflow run linux_24.lts.1+ --ref 24.lts.1+ -f nightly=true
+ gh workflow run raspi-2_24.lts.1+ --ref 24.lts.1+ -f nightly=true
+ gh workflow run win32_24.lts.1+ --ref 24.lts.1+ -f nightly=true
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/pr_badges.yaml b/.github/workflows/pr_badges.yaml
new file mode 100644
index 0000000..3fcd93e
--- /dev/null
+++ b/.github/workflows/pr_badges.yaml
@@ -0,0 +1,69 @@
+name: PR badges
+
+on:
+ pull_request_target:
+ branches:
+ - 'feature/*'
+ - 'main'
+ - '24.lts.1\+'
+ - '23.lts.1\+'
+ - '22.lts.1\+'
+ - '21.lts.1\+'
+ - '20.lts.1\+'
+ - '19.lts.1\+'
+ - 'rc_11'
+ - 'COBALT_9'
+
+concurrency:
+ group: '${{ github.workflow }}-${{ github.event_name }}-${{ inputs.platform }} @ ${{ github.event.pull_request.number || github.sha }}'
+ cancel-in-progress: true
+
+permissions:
+ pull-requests: write
+
+jobs:
+ comment:
+ runs-on: ubuntu-latest
+ env:
+ GITHUB_SERVER_URL: ${{github.server_url}}
+ GITHUB_REPO: ${{github.repository}}
+ GITHUB_HEAD_REF: ${{ github.head_ref }}
+ steps:
+ - uses: actions/github-script@v6
+ with:
+ script: |
+ // Get env vars.
+ const { GITHUB_SERVER_URL, GITHUB_REPO, GITHUB_HEAD_REF } = process.env
+ // Get the existing comments.
+ const {data: comments} = await github.rest.issues.listComments({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ issue_number: context.payload.number,
+ })
+
+ // Find any comment already made by the bot.
+ const botComment = comments.find(comment => {
+ return comment.user.type === 'Bot' && comment.body.includes('Build Status')
+ })
+ const workflows = ["lint", "android", "evergreen", "linux", "raspi-2", "stub", "win32"]
+ var commentBody = `
+ ## Build Status
+ | Workflow | Status |
+ | --------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+ `
+ for (let i = 0; i < workflows.length; i++) {
+ commentBody += "| " + workflows[i] + " | [![" + workflows[i] + "](" + `${GITHUB_SERVER_URL}` + "/" + `${GITHUB_REPO}` + "/actions/workflows/" + workflows[i] + ".yaml/badge.svg?branch=" + `${GITHUB_HEAD_REF}` + ")](" + `${GITHUB_SERVER_URL}` + "/" + `${GITHUB_REPO}` + "/actions/workflows/" + workflows[i] + ".yaml?query=branch%3A" + `${GITHUB_HEAD_REF}` + ") |\n"
+ }
+ if (botComment) {
+ await github.rest.issues.deleteComment({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ comment_id: botComment.id,
+ })
+ }
+ await github.rest.issues.createComment({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ issue_number: context.payload.number,
+ body: commentBody
+ })
diff --git a/.github/workflows/pytest.yaml b/.github/workflows/pytest.yaml
new file mode 100644
index 0000000..787f056
--- /dev/null
+++ b/.github/workflows/pytest.yaml
@@ -0,0 +1,40 @@
+name: python-tests
+
+on:
+ pull_request:
+ push:
+ branches:
+ - main
+ - feature/*
+
+concurrency:
+ group: '${{ github.workflow }}-${{ github.event_name }}-${{ inputs.platform }} @ ${{ github.event.pull_request.number || github.sha }}'
+ cancel-in-progress: true
+
+permissions: {}
+
+jobs:
+ python-test:
+ strategy:
+ matrix:
+ os: [ubuntu-latest, windows-latest]
+ python-version: ['3.7', '3.11']
+ fail-fast: false
+ runs-on: ${{ matrix.os }}
+ steps:
+ - name: Checkout
+ uses: kaidokert/checkout@v3.5.999
+ with:
+ fetch-depth: 1
+ persist-credentials: false
+ - name: Setup Python
+ uses: actions/setup-python@v4
+ with:
+ python-version: ${{ matrix.python-version }}
+ cache: 'pip'
+ - name: Install Pip Packages
+ run: pip install --require-hashes --no-deps -r ${{ github.workspace }}/docker/pytest/requirements.txt
+ - name: Run Tests
+ run: coverage run -m pytest
+ - name: Coverage Report
+ run: coverage report -m
diff --git a/.github/workflows/raspi-2.yaml b/.github/workflows/raspi-2.yaml
new file mode 100644
index 0000000..62d0708
--- /dev/null
+++ b/.github/workflows/raspi-2.yaml
@@ -0,0 +1,40 @@
+name: raspi-2
+
+on:
+ pull_request:
+ types: [ready_for_review, opened, reopened, synchronize, labeled]
+ branches:
+ - main
+ - feature/*
+ push:
+ branches:
+ - main
+ - feature/*
+ schedule:
+ # GMT timezone.
+ - cron: '0 4 * * *'
+ workflow_dispatch:
+ inputs:
+ nightly:
+ description: 'Nightly workflow.'
+ required: true
+ type: boolean
+ default: false
+
+jobs:
+ raspi-2:
+ uses: ./.github/workflows/main.yaml
+ permissions:
+ packages: write
+ pull-requests: write
+ with:
+ platform: raspi-2
+ nightly: ${{ github.event.inputs.nightly }}
+ raspi-2-skia:
+ uses: ./.github/workflows/main.yaml
+ permissions:
+ packages: write
+ pull-requests: write
+ with:
+ platform: raspi-2-skia
+ nightly: ${{ github.event.inputs.nightly }}
diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml
new file mode 100644
index 0000000..4514e18
--- /dev/null
+++ b/.github/workflows/scorecards.yml
@@ -0,0 +1,58 @@
+# This workflow uses actions that are not certified by GitHub. They are provided
+# by a third-party and are governed by separate terms of service, privacy
+# policy, and support documentation.
+
+name: Scorecards supply-chain security
+on:
+ # For Branch-Protection check. Only the default branch is supported. See
+ # https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection
+ branch_protection_rule:
+ # To guarantee Maintained check is occasionally updated. See
+ # https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained
+ schedule:
+ - cron: '18 14 * * 1'
+ push:
+ branches: [ main ]
+ pull_request:
+ branches: [ main ]
+
+# Declare default permissions as read only.
+permissions: read-all
+
+jobs:
+ analysis:
+ name: Scorecards analysis
+ runs-on: ubuntu-latest
+ permissions:
+ # Needed to upload the results to code-scanning dashboard.
+ security-events: write
+ # Needed to publish results and get a badge (see publish_results below).
+ id-token: write
+
+ steps:
+ - name: "Checkout code"
+ uses: actions/checkout@v3
+ with:
+ persist-credentials: false
+
+ - name: "Run analysis"
+ uses: ossf/scorecard-action@e38b1902ae4f44df626f11ba0734b14fb91f8f86 # v2.1.2
+ with:
+ results_file: results.sarif
+ results_format: sarif
+ publish_results: true
+
+ # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
+ # format to the repository Actions tab.
+ - name: "Upload artifact"
+ uses: actions/upload-artifact@v3.1.1
+ with:
+ name: SARIF file
+ path: results.sarif
+ retention-days: 5
+
+ # Upload the results to GitHub's code scanning dashboard.
+ - name: "Upload to code-scanning"
+ uses: github/codeql-action/upload-sarif@v2.1.37
+ with:
+ sarif_file: results.sarif
diff --git a/.github/workflows/stub.yaml b/.github/workflows/stub.yaml
new file mode 100644
index 0000000..c105a59
--- /dev/null
+++ b/.github/workflows/stub.yaml
@@ -0,0 +1,31 @@
+name: stub
+
+on:
+ pull_request:
+ types: [ready_for_review, opened, reopened, synchronize, labeled]
+ branches:
+ - main
+ - feature/*
+ push:
+ branches:
+ - main
+ - feature/*
+ workflow_dispatch:
+ inputs:
+ nightly:
+ description: 'Nightly workflow.'
+ required: true
+ type: boolean
+ default: false
+
+jobs:
+ stub:
+ uses: ./.github/workflows/main.yaml
+ permissions:
+ packages: write
+ pull-requests: write
+ with:
+ platform: stub
+ nightly: 'false'
+ run_api_leak_detector: true
+ leak_manifest_filename: "gn_built_docker_debian10_manifest"
diff --git a/.github/workflows/win32.yaml b/.github/workflows/win32.yaml
new file mode 100644
index 0000000..449a9d2
--- /dev/null
+++ b/.github/workflows/win32.yaml
@@ -0,0 +1,32 @@
+name: win32
+
+on:
+ pull_request:
+ types: [ready_for_review, opened, reopened, synchronize, labeled]
+ branches:
+ - main
+ - feature/*
+ push:
+ branches:
+ - main
+ - feature/*
+ schedule:
+ # GTM timezone.
+ - cron: '0 4 * * *'
+ workflow_dispatch:
+ inputs:
+ nightly:
+ description: 'Nightly workflow.'
+ required: true
+ type: boolean
+ default: false
+
+jobs:
+ win32:
+ uses: ./.github/workflows/main_win.yaml
+ permissions:
+ packages: write
+ pull-requests: write
+ with:
+ platform: win32
+ nightly: ${{ github.event.inputs.nightly }}
diff --git a/.github/workflows/workflow_trigger.yaml b/.github/workflows/workflow_trigger.yaml
new file mode 100644
index 0000000..c4122d8
--- /dev/null
+++ b/.github/workflows/workflow_trigger.yaml
@@ -0,0 +1,49 @@
+name: workflow_trigger
+
+on:
+ workflow_dispatch:
+ inputs:
+ branch:
+ type: choice
+ description: Branch
+ options:
+ - '24.lts.1+'
+ - '23.lts.1+'
+ - '22.lts.1+'
+ - '21.lts.1+'
+ - '20.lts.1+'
+ - '19.lts.1+'
+ - 'rc_11'
+ - 'COBALT_9'
+ workflow:
+ type: choice
+ description: Workflow name
+ options:
+ - 'android'
+ - 'evergreen'
+ - 'linux'
+ - 'raspi'
+ - 'win32'
+ nightly:
+ description: 'Nightly workflow.'
+ required: true
+ type: boolean
+ default: false
+
+jobs:
+ trigger:
+ permissions:
+ actions: write
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: kaidokert/checkout@v3.5.999
+ with:
+ fetch-depth: 1
+ ref: ${{ github.event.branch }}
+ - name: Trigger Workflow
+ run: |
+ set -x
+ gh workflow run ${{ github.event.inputs.workflow }}_${{ github.event.inputs.branch }} --ref ${{ github.event.inputs.branch }} -f nightly=${{ github.event.inputs.nightly }}
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index cde10f9..c4bd214 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -11,8 +11,16 @@
base|
build|
buildtools|
+ components/crash/core/common|
+ components/metrics|
+ components/metrics_services_manager|
+ components/ukm|
+ components/variations|
+ components/version_info|
crypto|
+ extensions/buildflags|
net|
+ internal/starboard/shared/playstation/glimp/shaders|
testing|
third_party|
tools/gyp|
@@ -20,6 +28,8 @@
)/
|
components/update_client/((?!cobalt).)*$
+ |
+ .*\.sig$
)
repos:
@@ -43,10 +53,12 @@
--ignore-words-list, atleast]
exclude: |
(?x)^(
- starboard/[^/]+/i18n/|
+ (internal/)?starboard/[^/]+/i18n/|
cobalt/content/licenses/|
cobalt/fetch/embedded_scripts|
- cobalt/loader/cors_preflight.cc
+ cobalt/loader/cors_preflight.cc|
+ internal/cobalt/browser/splash_screen/youtube_splash_screen.html|
+ internal/starboard/shared/playstation/storage_internal.cc
)
- repo: local
@@ -73,7 +85,7 @@
exclude: |
(?x)(
^cobalt/bindings/(templates|generated)/|
- ^starboard/shared/uikit/.*\.h$
+ ^internal/starboard/shared/uikit/.*\.h$
)
- id: yapf
name: yapf
diff --git a/BUILD_STATUS.md b/BUILD_STATUS.md
index c209877..28ea016 100644
--- a/BUILD_STATUS.md
+++ b/BUILD_STATUS.md
@@ -1,21 +1,22 @@
# Build Status
-| Workflow | Main | 23.lts.1+ | 22.lts.1+ | 21.lts.1+ | 20.lts.1+ | 19.lts.1+ | RC11 | COBALT 9 |
-| --------- | ---- | --------- | --------- | --------- | --------- | --------- | ---- | ---------|
-| Lint | [](https://github.com/youtube/cobalt/actions/workflows/lint.yaml?query=event%3Apush+branch%3Amain) | | | | | | | |
-| Android | [](https://github.com/youtube/cobalt/actions/workflows/android.yaml?query=event%3Apush+branch%3Amain) | [](https://github.com/youtube/cobalt/actions/workflows/android_23.lts.1+.yaml?query=event%3Apush+branch%3A23.lts.1%2B) | [](https://github.com/youtube/cobalt/actions/workflows/android_22.lts.1+.yaml?query=branch%3A22.lts.1%2B+event%3Apush) | | | | | |
-| Evergreen | [](https://github.com/youtube/cobalt/actions/workflows/evergreen.yaml?query=event%3Apush+branch%3Amain) | [](https://github.com/youtube/cobalt/actions/workflows/evergreen_23.lts.1+.yaml?query=event%3Apush+branch%3A23.lts.1%2B) | [](https://github.com/youtube/cobalt/actions/workflows/evergreen_22.lts.1+.yaml?query=branch%3A22.lts.1%2B+event%3Apush) | [](https://github.com/youtube/cobalt/actions/workflows/evergreen_21.lts.1+.yaml) | | | | |
-| Linux | [](https://github.com/youtube/cobalt/actions/workflows/linux.yaml?query=event%3Apush+branch%3Amain) | [](https://github.com/youtube/cobalt/actions/workflows/linux_23.lts.1+.yaml?query=event%3Apush+branch%3A23.lts.1%2B) | [](https://github.com/youtube/cobalt/actions/workflows/linux_22.lts.1+.yaml?query=branch%3A22.lts.1%2B+event%3Apush) | [](https://github.com/youtube/cobalt/actions/workflows/linux_21.lts.1+.yaml) | [](https://github.com/youtube/cobalt/actions/workflows/linux_20.lts.1+.yaml?query=branch%3A20.lts.1%2B+event%3Apush) | [](https://github.com/youtube/cobalt/actions/workflows/linux_19.lts.1+.yaml?query=branch%3A19.lts.1%2B+event%3Apush) | [](https://github.com/youtube/cobalt/actions/workflows/linux_rc_11.yaml?query=event%3Apush+branch%3Arc_11) | [](https://github.com/youtube/cobalt/actions/workflows/linux_COBALT_9.yaml?query=event%3Apush+branch%3ACOBALT_9) |
-| raspi-2 | [](https://github.com/youtube/cobalt/actions/workflows/raspi-2.yaml?query=event%3Apush+branch%3Amain) | [](https://github.com/youtube/cobalt/actions/workflows/raspi-2_23.lts.1+.yaml?query=event%3Apush+branch%3A23.lts.1%2B) | [](https://github.com/youtube/cobalt/actions/workflows/raspi-2_22.lts.1+.yaml?query=branch%3A22.lts.1%2B+event%3Apush) | [](https://github.com/youtube/cobalt/actions/workflows/raspi-2_21.lts.1+.yaml) | [](https://github.com/youtube/cobalt/actions/workflows/raspi-2_20.lts.1+.yaml?query=branch%3A20.lts.1%2B+event%3Apush) | [](https://github.com/youtube/cobalt/actions/workflows/raspi-2_19.lts.1+.yaml?query=branch%3A19.lts.1%2B+event%3Apush) | [](https://github.com/youtube/cobalt/actions/workflows/raspi-2_rc_11.yaml?query=event%3Apush+branch%3Arc_11) | [](https://github.com/youtube/cobalt/actions/workflows/raspi-2_COBALT_9.yaml?query=event%3Apush+branch%3ACOBALT_9) |
-| stub | [](https://github.com/youtube/cobalt/actions/workflows/stub.yaml?query=event%3Apush+branch%3Amain) | [](https://github.com/youtube/cobalt/actions/workflows/stub_23.lts.1+.yaml?query=event%3Apush+branch%3A23.lts.1%2B) | | | | | | |
-| Win32 | [](https://github.com/youtube/cobalt/actions/workflows/win32.yaml?query=event%3Apush+branch%3Amain) | [](https://github.com/youtube/cobalt/actions/workflows/win32_23.lts.1+.yaml?query=event%3Apush+branch%3A23.lts.1%2B) | | | | | | |
-
+| Workflow | Main | 24.lts.1+ | 23.lts.1+ | 22.lts.1+ | 21.lts.1+ | 20.lts.1+ | 19.lts.1+ | RC11 | COBALT 9 |
+| --------- | ---- | --------- | --------- | --------- | --------- | --------- | --------- | ---- | ---------|
+| Lint | [](https://github.com/youtube/cobalt/actions/workflows/lint.yaml?query=event%3Apush+branch%3Amain) | | | | | | | | |
+| Android | [](https://github.com/youtube/cobalt/actions/workflows/android.yaml?query=event%3Apush+branch%3Amain) | [](https://github.com/youtube/cobalt/actions/workflows/android_24.lts.1+.yaml?query=event%3Apush+branch%3A24.lts.1%2B) | [](https://github.com/youtube/cobalt/actions/workflows/android_23.lts.1+.yaml?query=event%3Apush+branch%3A23.lts.1%2B) | [](https://github.com/youtube/cobalt/actions/workflows/android_22.lts.1+.yaml?query=branch%3A22.lts.1%2B+event%3Apush) | | | | | |
+| Evergreen | [](https://github.com/youtube/cobalt/actions/workflows/evergreen.yaml?query=event%3Apush+branch%3Amain) | [](https://github.com/youtube/cobalt/actions/workflows/evergreen_24.lts.1+.yaml?query=event%3Apush+branch%3A24.lts.1%2B) | [](https://github.com/youtube/cobalt/actions/workflows/evergreen_23.lts.1+.yaml?query=event%3Apush+branch%3A23.lts.1%2B) | [](https://github.com/youtube/cobalt/actions/workflows/evergreen_22.lts.1+.yaml?query=branch%3A22.lts.1%2B+event%3Apush) | [](https://github.com/youtube/cobalt/actions/workflows/evergreen_21.lts.1+.yaml?query=branch%3A21.lts.1%2B+event%3Apush) | | | | |
+| Linux | [](https://github.com/youtube/cobalt/actions/workflows/linux.yaml?query=event%3Apush+branch%3Amain) | [](https://github.com/youtube/cobalt/actions/workflows/linux_24.lts.1+.yaml?query=event%3Apush+branch%3A24.lts.1%2B) | [](https://github.com/youtube/cobalt/actions/workflows/linux_23.lts.1+.yaml?query=event%3Apush+branch%3A23.lts.1%2B) | [](https://github.com/youtube/cobalt/actions/workflows/linux_22.lts.1+.yaml?query=branch%3A22.lts.1%2B+event%3Apush) | [](https://github.com/youtube/cobalt/actions/workflows/linux_21.lts.1+.yaml?query=branch%3A21.lts.1%2B+event%3Apush) | [](https://github.com/youtube/cobalt/actions/workflows/linux_20.lts.1+.yaml?query=branch%3A20.lts.1%2B+event%3Apush) | [](https://github.com/youtube/cobalt/actions/workflows/linux_19.lts.1+.yaml?query=branch%3A19.lts.1%2B+event%3Apush) | [](https://github.com/youtube/cobalt/actions/workflows/linux_rc_11.yaml?query=event%3Apush+branch%3Arc_11) | [](https://github.com/youtube/cobalt/actions/workflows/linux_COBALT_9.yaml?query=event%3Apush+branch%3ACOBALT_9) |
+| Raspi-2 | [](https://github.com/youtube/cobalt/actions/workflows/raspi-2.yaml?query=event%3Apush+branch%3Amain) | [](https://github.com/youtube/cobalt/actions/workflows/raspi-2_24.lts.1+.yaml?query=event%3Apush+branch%3A24.lts.1%2B) | [](https://github.com/youtube/cobalt/actions/workflows/raspi-2_23.lts.1+.yaml?query=event%3Apush+branch%3A23.lts.1%2B) | [](https://github.com/youtube/cobalt/actions/workflows/raspi-2_22.lts.1+.yaml?query=branch%3A22.lts.1%2B+event%3Apush) | [](https://github.com/youtube/cobalt/actions/workflows/raspi-2_21.lts.1+.yaml?query=branch%3A21.lts.1%2B+event%3Apush) | [](https://github.com/youtube/cobalt/actions/workflows/raspi-2_20.lts.1+.yaml?query=branch%3A20.lts.1%2B+event%3Apush) | [](https://github.com/youtube/cobalt/actions/workflows/raspi-2_19.lts.1+.yaml?query=branch%3A19.lts.1%2B+event%3Apush) | [](https://github.com/youtube/cobalt/actions/workflows/raspi-2_rc_11.yaml?query=event%3Apush+branch%3Arc_11) | [](https://github.com/youtube/cobalt/actions/workflows/raspi-2_COBALT_9.yaml?query=event%3Apush+branch%3ACOBALT_9) |
+| Stub | [](https://github.com/youtube/cobalt/actions/workflows/stub.yaml?query=event%3Apush+branch%3Amain) | [](https://github.com/youtube/cobalt/actions/workflows/stub_24.lts.1+.yaml?query=event%3Apush+branch%3A24.lts.1%2B) | [](https://github.com/youtube/cobalt/actions/workflows/stub_23.lts.1+.yaml?query=event%3Apush+branch%3A23.lts.1%2B) | | | | | | |
+| Win32 | [](https://github.com/youtube/cobalt/actions/workflows/win32.yaml?query=event%3Apush+branch%3Amain) | [](https://github.com/youtube/cobalt/actions/workflows/win32_24.lts.1+.yaml?query=event%3Apush+branch%3A24.lts.1%2B) | [](https://github.com/youtube/cobalt/actions/workflows/win32_23.lts.1+.yaml?query=event%3Apush+branch%3A23.lts.1%2B) | | | | | | |
+| Python | [](https://github.com/youtube/cobalt/actions/workflows/pytest.yaml?query=event%3Apush+branch%3Amain) | | | | | | | |
+| Java | [](https://github.com/youtube/cobalt/actions/workflows/gradle.yaml?query=event%3Apush+branch%3Amain) | | | | | | | |
# Nightly builds
-| Workflow | main | 23.lts.1+ | 22.lts.1+ | 21.lts.1+ | 20.lts.1+ | 19.lts.1+ | RC11 | COBALT 9 |
-| --------- | ---- | --------- | --------- | --------- | --------- | --------- | ---- | ---------|
-| Android | [](https://github.com/youtube/cobalt/actions/workflows/android.yaml?query=event%3Aschedule+branch%3Amain) | [](https://github.com/youtube/cobalt/actions/workflows/android_23.lts.1+.yaml?query=event%3Aworkflow_dispatch+branch%3A23.lts.1%2B) | [](https://github.com/youtube/cobalt/actions/workflows/android_22.lts.1+.yaml?query=branch%3A22.lts.1%2B+event%3Aworkflow_dispatch) | | | | | |
-| Evergreen | [](https://github.com/youtube/cobalt/actions/workflows/evergreen.yaml?query=event%3Aschedule+branch%3Amain) | [](https://github.com/youtube/cobalt/actions/workflows/evergreen_23.lts.1+.yaml?query=event%3Aworkflow_dispatch+branch%3A23.lts.1%2B) | [](https://github.com/youtube/cobalt/actions/workflows/evergreen_22.lts.1+.yaml?query=branch%3A22.lts.1%2B+event%3Aworkflow_dispatch) | [](https://github.com/youtube/cobalt/actions/workflows/evergreen_21.lts.1+.yaml?query=event%3Aworkflow_dispatch+branch%3A21.lts.1%2B) | | | | |
-| Linux | [](https://github.com/youtube/cobalt/actions/workflows/linux.yaml?query=event%3Aschedule+branch%3Amain) | [](https://github.com/youtube/cobalt/actions/workflows/linux_23.lts.1+.yaml?query=event%3Aworkflow_dispatch+branch%3A23.lts.1%2B) | [](https://github.com/youtube/cobalt/actions/workflows/linux_22.lts.1+.yaml?query=branch%3A22.lts.1%2B+event%3Aworkflow_dispatch) | [](https://github.com/youtube/cobalt/actions/workflows/linux_21.lts.1+.yaml?query=event%3Aworkflow_dispatch+branch%3A21.lts.1%2B) | [](https://github.com/youtube/cobalt/actions/workflows/linux_20.lts.1+.yaml?query=event%3Aworkflow_dispatch+branch%3A20.lts.1%2B) | [](https://github.com/youtube/cobalt/actions/workflows/linux_19.lts.1+.yaml?query=event%3Aworkflow_dispatch+branch%3A19.lts.1%2B) | [](https://github.com/youtube/cobalt/actions/workflows/linux_rc_11.yaml?query=event%3Aworkflow_dispatch+branch%3Arc_11) | [](https://github.com/youtube/cobalt/actions/workflows/linux_COBALT_9.yaml?query=event%3Aworkflow_dispatch+branch%3ACOBALT_9) |
-| raspi-2 | [](https://github.com/youtube/cobalt/actions/workflows/raspi-2.yaml?query=event%3Aschedule+branch%3Amain) | [](https://github.com/youtube/cobalt/actions/workflows/raspi-2_23.lts.1+.yaml?query=event%3Aworkflow_dispatch+branch%3A23.lts.1%2B) | [](https://github.com/youtube/cobalt/actions/workflows/raspi-2_22.lts.1+.yaml?query=branch%3A22.lts.1%2B+event%3Aworkflow_dispatch) | [](https://github.com/youtube/cobalt/actions/workflows/raspi-2_21.lts.1+.yaml?query=event%3Aworkflow_dispatch+branch%3A21.lts.1%2B) | [](https://github.com/youtube/cobalt/actions/workflows/raspi-2_20.lts.1+.yaml?query=event%3Aworkflow_dispatch+branch%3A20.lts.1%2B) | [](https://github.com/youtube/cobalt/actions/workflows/raspi-2_19.lts.1+.yaml?query=event%3Aworkflow_dispatch+branch%3A19.lts.1%2B) | [](https://github.com/youtube/cobalt/actions/workflows/raspi-2_rc_11.yaml?query=event%3Aworkflow_dispatch+branch%3Arc_11) | [](https://github.com/youtube/cobalt/actions/workflows/raspi-2_COBALT_9.yaml?query=event%3Aworkflow_dispatch+branch%3ACOBALT_9) |
-| Win32 | [](https://github.com/youtube/cobalt/actions/workflows/win32.yaml?query=event%3Aschedule+branch%3Amain) | [](https://github.com/youtube/cobalt/actions/workflows/win32_23.lts.1+.yaml?query=event%3Aworkflow_dispatch+branch%3A23.lts.1%2B) | | | | | | |
+| Workflow | main | 24.lts.1+ | 23.lts.1+ | 22.lts.1+ | 21.lts.1+ | 20.lts.1+ | 19.lts.1+ | RC11 | COBALT 9 |
+| --------- | ---- | --------- | --------- | --------- | --------- | --------- | --------- | ---- | ---------|
+| Android | [](https://github.com/youtube/cobalt/actions/workflows/android.yaml?query=event%3Aschedule+branch%3Amain) | [](https://github.com/youtube/cobalt/actions/workflows/android_24.lts.1+.yaml?query=event%3Aworkflow_dispatch+branch%3A24.lts.1%2B) | [](https://github.com/youtube/cobalt/actions/workflows/android_23.lts.1+.yaml?query=event%3Aworkflow_dispatch+branch%3A23.lts.1%2B) | [](https://github.com/youtube/cobalt/actions/workflows/android_22.lts.1+.yaml?query=branch%3A22.lts.1%2B+event%3Aworkflow_dispatch) | | | | | |
+| Evergreen | [](https://github.com/youtube/cobalt/actions/workflows/evergreen.yaml?query=event%3Aschedule+branch%3Amain) | [](https://github.com/youtube/cobalt/actions/workflows/evergreen_24.lts.1+.yaml?query=event%3Aworkflow_dispatch+branch%3A24.lts.1%2B) | [](https://github.com/youtube/cobalt/actions/workflows/evergreen_23.lts.1+.yaml?query=event%3Aworkflow_dispatch+branch%3A23.lts.1%2B) | [](https://github.com/youtube/cobalt/actions/workflows/evergreen_22.lts.1+.yaml?query=branch%3A22.lts.1%2B+event%3Aworkflow_dispatch) | [](https://github.com/youtube/cobalt/actions/workflows/evergreen_21.lts.1+.yaml?query=event%3Aworkflow_dispatch+branch%3A21.lts.1%2B) | | | | |
+| Linux | [](https://github.com/youtube/cobalt/actions/workflows/linux.yaml?query=event%3Aschedule+branch%3Amain) | [](https://github.com/youtube/cobalt/actions/workflows/linux_24.lts.1+.yaml?query=event%3Aworkflow_dispatch+branch%3A24.lts.1%2B) | [](https://github.com/youtube/cobalt/actions/workflows/linux_23.lts.1+.yaml?query=event%3Aworkflow_dispatch+branch%3A23.lts.1%2B) | [](https://github.com/youtube/cobalt/actions/workflows/linux_22.lts.1+.yaml?query=branch%3A22.lts.1%2B+event%3Aworkflow_dispatch) | [](https://github.com/youtube/cobalt/actions/workflows/linux_21.lts.1+.yaml?query=event%3Aworkflow_dispatch+branch%3A21.lts.1%2B) | [](https://github.com/youtube/cobalt/actions/workflows/linux_20.lts.1+.yaml?query=event%3Aworkflow_dispatch+branch%3A20.lts.1%2B) | [](https://github.com/youtube/cobalt/actions/workflows/linux_19.lts.1+.yaml?query=event%3Aworkflow_dispatch+branch%3A19.lts.1%2B) | [](https://github.com/youtube/cobalt/actions/workflows/linux_rc_11.yaml?query=event%3Aworkflow_dispatch+branch%3Arc_11) | [](https://github.com/youtube/cobalt/actions/workflows/linux_COBALT_9.yaml?query=event%3Aworkflow_dispatch+branch%3ACOBALT_9) |
+| Raspi-2 | [](https://github.com/youtube/cobalt/actions/workflows/raspi-2.yaml?query=event%3Aschedule+branch%3Amain) | [](https://github.com/youtube/cobalt/actions/workflows/raspi-2_24.lts.1+.yaml?query=event%3Aworkflow_dispatch+branch%3A24.lts.1%2B) | [](https://github.com/youtube/cobalt/actions/workflows/raspi-2_23.lts.1+.yaml?query=event%3Aworkflow_dispatch+branch%3A23.lts.1%2B) | [](https://github.com/youtube/cobalt/actions/workflows/raspi-2_22.lts.1+.yaml?query=branch%3A22.lts.1%2B+event%3Aworkflow_dispatch) | [](https://github.com/youtube/cobalt/actions/workflows/raspi-2_21.lts.1+.yaml?query=event%3Aworkflow_dispatch+branch%3A21.lts.1%2B) | [](https://github.com/youtube/cobalt/actions/workflows/raspi-2_20.lts.1+.yaml?query=event%3Aworkflow_dispatch+branch%3A20.lts.1%2B) | [](https://github.com/youtube/cobalt/actions/workflows/raspi-2_19.lts.1+.yaml?query=event%3Aworkflow_dispatch+branch%3A19.lts.1%2B) | [](https://github.com/youtube/cobalt/actions/workflows/raspi-2_rc_11.yaml?query=event%3Aworkflow_dispatch+branch%3Arc_11) | [](https://github.com/youtube/cobalt/actions/workflows/raspi-2_COBALT_9.yaml?query=event%3Aworkflow_dispatch+branch%3ACOBALT_9) |
+| Win32 | [](https://github.com/youtube/cobalt/actions/workflows/win32.yaml?query=event%3Aschedule+branch%3Amain) | [](https://github.com/youtube/cobalt/actions/workflows/win32_24.lts.1+.yaml?query=event%3Aworkflow_dispatch+branch%3A24.lts.1%2B) | [](https://github.com/youtube/cobalt/actions/workflows/win32_23.lts.1+.yaml?query=event%3Aworkflow_dispatch+branch%3A23.lts.1%2B) | | | | | | |
diff --git a/README.md b/README.md
index 1ffff01..fcfe4a6 100644
--- a/README.md
+++ b/README.md
@@ -173,7 +173,8 @@
* [Linux](cobalt/site/docs/development/setup-linux.md)
* [Raspi](cobalt/site/docs/development/setup-raspi.md)
* [Android](cobalt/site/docs/development/setup-android.md)
-
+ * [Docker](cobalt/site/docs/development/setup-docker.md)
+ * [RDK](cobalt/site/docs/development/setup-rdk.md)
## Build Types
diff --git a/base/BUILD.gn b/base/BUILD.gn
index 93f6256..25759ac 100644
--- a/base/BUILD.gn
+++ b/base/BUILD.gn
@@ -1215,8 +1215,6 @@
"memory/platform_shared_memory_region.h",
"memory/protected_memory_cfi.h",
"memory/protected_memory_win.cc",
- "memory/shared_memory_handle.cc",
- "memory/shared_memory_handle.h",
"memory/shared_memory_helper.cc",
"memory/shared_memory_helper.h",
"memory/shared_memory_mapping.cc",
@@ -1239,12 +1237,6 @@
"message_loop/message_pump_mac.mm",
"message_loop/message_pump_win.cc",
"message_loop/message_pump_win.h",
- "metrics/field_trial.cc",
- "metrics/field_trial.h",
- "metrics/field_trial_param_associator.cc",
- "metrics/field_trial_param_associator.h",
- "metrics/field_trial_params.cc",
- "metrics/field_trial_params.h",
"native_library.cc",
"native_library.h",
"native_library_ios.mm",
diff --git a/base/codereview.settings b/base/codereview.settings
new file mode 100644
index 0000000..a4b341a
--- /dev/null
+++ b/base/codereview.settings
@@ -0,0 +1,4 @@
+# This file is used by gcl to get repository specific information.
+GERRIT_HOST: lbshell-internal-review.googlesource.com
+GERRIT_AUTODETECT_BRANCH: true
+CODE_REVIEW_SERVER: lbshell-internal-review.googlesource.com
diff --git a/base/feature_list.cc b/base/feature_list.cc
index d8e00ac..5da3379 100644
--- a/base/feature_list.cc
+++ b/base/feature_list.cc
@@ -98,7 +98,6 @@
initialized_from_command_line_ = true;
}
-#if !defined(STARBOARD)
void FeatureList::InitializeFromSharedMemory(
PersistentMemoryAllocator* allocator) {
DCHECK(!initialized_);
@@ -115,10 +114,13 @@
continue;
FieldTrial* trial = FieldTrialList::Find(trial_name.as_string());
+#if defined(STARBOARD)
+ RegisterOverride(feature_name, override_state);
+#else
RegisterOverride(feature_name, override_state, trial);
+#endif
}
}
-#endif // !defined(STARBOARD)
bool FeatureList::IsFeatureOverriddenFromCommandLine(
const std::string& feature_name,
@@ -212,7 +214,6 @@
return g_feature_list_instance->IsFeatureEnabled(feature);
}
-#if !defined(STARBOARD)
// static
FieldTrial* FeatureList::GetFieldTrial(const Feature& feature) {
if (!g_feature_list_instance) {
@@ -221,7 +222,6 @@
}
return g_feature_list_instance->GetAssociatedFieldTrial(feature);
}
-#endif // !defined(STARBOARD)
// static
std::vector<base::StringPiece> FeatureList::SplitFeatureListString(
@@ -334,7 +334,6 @@
return feature.default_state == FEATURE_ENABLED_BY_DEFAULT;
}
-#if !defined(STARBOARD)
FieldTrial* FeatureList::GetAssociatedFieldTrial(const Feature& feature) {
DCHECK(initialized_);
DCHECK(IsValidFeatureOrFieldTrialName(feature.name)) << feature.name;
@@ -348,7 +347,6 @@
return nullptr;
}
-#endif // !defined(STARBOARD)
void FeatureList::RegisterOverridesFromCommandLine(
const std::string& feature_list,
@@ -389,7 +387,7 @@
// one already exists for the key. Thus, only the first override for a given
// feature name takes effect.
overrides_.insert(std::make_pair(feature_name.as_string(),
- OverrideEntry(overridden_state)));
+ OverrideEntry(overridden_state, nullptr)));
}
#else // defined(STARBOARD)
void FeatureList::RegisterOverride(StringPiece feature_name,
@@ -473,15 +471,10 @@
return it->second == &feature;
}
-#if defined(STARBOARD)
-FeatureList::OverrideEntry::OverrideEntry(OverrideState overridden_state)
- : overridden_state(overridden_state) {}
-#else
FeatureList::OverrideEntry::OverrideEntry(OverrideState overridden_state,
FieldTrial* field_trial)
: overridden_state(overridden_state),
field_trial(field_trial),
overridden_by_field_trial(field_trial != nullptr) {}
-#endif
} // namespace base
diff --git a/base/feature_list.h b/base/feature_list.h
index 5dcd336..621017a 100644
--- a/base/feature_list.h
+++ b/base/feature_list.h
@@ -116,12 +116,10 @@
void InitializeFromCommandLine(const std::string& enable_features,
const std::string& disable_features);
-#if !defined(STARBOARD)
// Initializes feature overrides through the field trial allocator, which
// we're using to store the feature names, their override state, and the name
// of the associated field trial.
void InitializeFromSharedMemory(PersistentMemoryAllocator* allocator);
-#endif // !defined(STARBOARD)
// Specifies whether a feature override enables or disables the feature.
enum OverrideState {
@@ -135,7 +133,6 @@
bool IsFeatureOverriddenFromCommandLine(const std::string& feature_name,
OverrideState state) const;
-#if !defined(STARBOARD)
// Associates a field trial for reporting purposes corresponding to the
// command-line setting the feature state to |for_overridden_state|. The trial
// will be activated when the state of the feature is first queried. This
@@ -156,9 +153,10 @@
OverrideState override_state,
FieldTrial* field_trial);
+#if !defined(STARBBOARD)
// Loops through feature overrides and serializes them all into |allocator|.
void AddFeaturesToAllocator(PersistentMemoryAllocator* allocator);
-#endif // !defined(STARBOARD)
+#endif
// Returns comma-separated lists of feature names (in the same format that is
// accepted by InitializeFromCommandLine()) corresponding to features that
@@ -182,11 +180,9 @@
// struct, which is checked in builds with DCHECKs enabled.
static bool IsEnabled(const Feature& feature);
-#if !defined(STARBOARD)
// Returns the field trial associated with the given |feature|. Must only be
// called after the singleton instance has been registered via SetInstance().
static FieldTrial* GetFieldTrial(const Feature& feature);
-#endif // !defined(STARBOARD)
// Splits a comma-separated string containing feature names into a vector. The
// resulting pieces point to parts of |input|.
@@ -233,7 +229,6 @@
// The overridden enable (on/off) state of the feature.
const OverrideState overridden_state;
-#if !defined(STARBOARD)
// An optional associated field trial, which will be activated when the
// state of the feature is queried for the first time. Weak pointer to the
// FieldTrial object that is owned by the FieldTrialList singleton.
@@ -246,14 +241,10 @@
const bool overridden_by_field_trial;
// TODO(asvitkine): Expand this as more support is added.
-
// Constructs an OverrideEntry for the given |overridden_state|. If
// |field_trial| is not null, it implies that |overridden_state| comes from
// the trial, so |overridden_by_field_trial| will be set to true.
OverrideEntry(OverrideState overridden_state, FieldTrial* field_trial);
-#else // !defined(STARBOARD)
- OverrideEntry(OverrideState overridden_state);
-#endif // !defined(STARBOARD)
};
// Finalizes the initialization state of the FeatureList, so that no further
@@ -266,13 +257,11 @@
// Requires the FeatureList to have already been fully initialized.
bool IsFeatureEnabled(const Feature& feature);
-#if !defined(STARBOARD)
// Returns the field trial associated with the given |feature|. This is
// invoked by the public FeatureList::GetFieldTrial() static function on the
// global singleton. Requires the FeatureList to have already been fully
// initialized.
base::FieldTrial* GetAssociatedFieldTrial(const Feature& feature);
-#endif // !defined(STARBOARD)
// For each feature name in comma-separated list of strings |feature_list|,
// registers an override with the specified |overridden_state|. Also, will
@@ -292,7 +281,7 @@
void RegisterOverride(StringPiece feature_name,
OverrideState overridden_state,
FieldTrial* field_trial);
-#else // !defined(STARBOARD)
+#else // !defined(STARBOARD)
void RegisterOverride(StringPiece feature_name,
OverrideState overridden_state);
#endif // !defined(STARBOARD)
diff --git a/base/files/memory_mapped_file.h b/base/files/memory_mapped_file.h
index 13f2e58..1797576 100644
--- a/base/files/memory_mapped_file.h
+++ b/base/files/memory_mapped_file.h
@@ -16,8 +16,6 @@
#include "starboard/types.h"
#endif
-#if !defined(STARBOARD)
-
namespace base {
class FilePath;
@@ -136,6 +134,4 @@
} // namespace base
-#endif // !defined(STARBOARD)
-
#endif // BASE_FILES_MEMORY_MAPPED_FILE_H_
diff --git a/base/memory/shared_memory_handle.cc b/base/memory/shared_memory_handle.cc
index 085bde4..00775d4 100644
--- a/base/memory/shared_memory_handle.cc
+++ b/base/memory/shared_memory_handle.cc
@@ -6,6 +6,16 @@
namespace base {
+// As we don't support shared memory in Cobalt, we use the stub impl of
+// SharedMemoryHandle and needs a default ctor to make the compiler happy.
+#if defined(STARBOARD)
+SharedMemoryHandle::SharedMemoryHandle() {}
+
+bool SharedMemoryHandle::IsValid() const {
+ return false;
+}
+#endif
+
SharedMemoryHandle::SharedMemoryHandle(const SharedMemoryHandle& handle) =
default;
diff --git a/base/memory/shared_memory_handle.h b/base/memory/shared_memory_handle.h
index 8329829..4354617 100644
--- a/base/memory/shared_memory_handle.h
+++ b/base/memory/shared_memory_handle.h
@@ -5,8 +5,6 @@
#ifndef BASE_MEMORY_SHARED_MEMORY_HANDLE_H_
#define BASE_MEMORY_SHARED_MEMORY_HANDLE_H_
-// Starboard doesn't curretly support multiple processes or shared memory.
-#if !defined(STARBOARD)
#include "base/unguessable_token.h"
#include "build/build_config.h"
@@ -240,5 +238,4 @@
} // namespace base
-#endif // !defined(STARBOARD)
#endif // BASE_MEMORY_SHARED_MEMORY_HANDLE_H_
diff --git a/base/metrics/field_trial.cc b/base/metrics/field_trial.cc
index 3130afd..eaaec1b 100644
--- a/base/metrics/field_trial.cc
+++ b/base/metrics/field_trial.cc
@@ -4,8 +4,6 @@
#include "base/metrics/field_trial.h"
-#if !defined(STARBOARD)
-
#include <algorithm>
#include <utility>
@@ -54,7 +52,8 @@
// This is safe from race conditions because MakeIterable is a release operation
// and GetNextOfType is an acquire operation, so memory writes before
// MakeIterable happen before memory reads after GetNextOfType.
-#if defined(OS_FUCHSIA) // TODO(752368): Not yet supported on Fuchsia.
+// TODO(752368): Not yet supported on Fuchsia.
+#if defined(OS_FUCHSIA) || defined(STARBOARD)
const bool kUseSharedMemoryForFieldTrials = false;
#else
const bool kUseSharedMemoryForFieldTrials = true;
@@ -218,7 +217,7 @@
#endif
}
-#if !defined(OS_NACL)
+#if !defined(OS_NACL) && !defined(STARBOARD)
// Returns whether the operation succeeded.
bool DeserializeGUIDFromStringPieces(base::StringPiece first,
base::StringPiece second,
@@ -841,7 +840,7 @@
result);
DCHECK(result);
}
-#elif defined(OS_POSIX) && !defined(OS_NACL)
+#elif defined(OS_POSIX) && !defined(OS_NACL) && !defined(STARBOARD)
// On POSIX, we check if the handle is valid by seeing if the browser process
// sent over the switch (we don't care about the value). Invalid handles
// occur in some browser tests which don't initialize the allocator.
@@ -895,7 +894,7 @@
handles->push_back(global_->readonly_allocator_handle_.GetHandle());
}
}
-#elif defined(OS_FUCHSIA)
+#elif defined(OS_FUCHSIA) || defined(STARBOARD)
// TODO(fuchsia): Implement shared-memory configuration (crbug.com/752368).
#elif defined(OS_POSIX) && !defined(OS_NACL)
// static
@@ -1218,6 +1217,9 @@
ss << uintptr_handle << ",";
#elif defined(OS_FUCHSIA)
ss << shm.GetHandle() << ",";
+#elif defined(STARBOARD)
+ ss << "unsupported"
+ << ",";
#elif !defined(OS_POSIX)
#error Unsupported OS
#endif
@@ -1303,7 +1305,7 @@
return false;
return FieldTrialList::CreateTrialsFromSharedMemoryHandle(shm);
}
-#elif defined(OS_POSIX) && !defined(OS_NACL)
+#elif defined(OS_POSIX) && !defined(OS_NACL) && !defined(STARBOARD)
// static
bool FieldTrialList::CreateTrialsFromDescriptor(
int fd_key,
@@ -1329,6 +1331,7 @@
}
#endif // defined(OS_POSIX) && !defined(OS_NACL)
+#if !defined(STARBOARD)
// static
bool FieldTrialList::CreateTrialsFromSharedMemoryHandle(
SharedMemoryHandle shm_handle) {
@@ -1372,9 +1375,13 @@
}
return true;
}
+#endif
// static
void FieldTrialList::InstantiateFieldTrialAllocatorIfNeeded() {
+#if defined(STARBOARD)
+ return;
+#else
if (!global_)
return;
AutoLock auto_lock(global_->lock_);
@@ -1414,6 +1421,7 @@
global_->readonly_allocator_handle_ = GetSharedMemoryReadOnlyHandle(
global_->field_trial_allocator_->shared_memory());
#endif
+#endif
}
// static
@@ -1530,4 +1538,3 @@
}
} // namespace base
-#endif // !defined(STARBOARD)
diff --git a/base/metrics/field_trial.h b/base/metrics/field_trial.h
index 1460410..3d877f2 100644
--- a/base/metrics/field_trial.h
+++ b/base/metrics/field_trial.h
@@ -80,20 +80,6 @@
#include "build/build_config.h"
#include "starboard/types.h"
-#if defined(STARBOARD)
-namespace base {
-
-class BASE_EXPORT FieldTrial : public RefCounted<FieldTrial> {};
-
-class BASE_EXPORT FieldTrialList {
- public:
- static std::string FindFullName(const std::string& trial_name) {
- return std::string();
- }
-};
-} // namespace base
-#else
-
namespace base {
class FieldTrialList;
@@ -102,8 +88,14 @@
public:
typedef int Probability; // Probability type for being selected in a trial.
+#if !defined(STARBOARD)
// TODO(665129): Make private again after crash has been resolved.
typedef SharedPersistentMemoryAllocator::Reference FieldTrialRef;
+#else
+ // In Cobalt, we don't export SharedPersistentMemoryAllocator, use the
+ // underlying type "uint32_t" directly here.
+ typedef uint32_t FieldTrialRef;
+#endif
// Specifies the persistence of the field trial group choice.
enum RandomizationType {
@@ -404,7 +396,12 @@
// the entire life time of the process.
class BASE_EXPORT FieldTrialList {
public:
+#if !defined(STARBOARD)
typedef SharedPersistentMemoryAllocator FieldTrialAllocator;
+#else
+ // In Cobalt, we don't import any shared memory constructs.
+ typedef LocalPersistentMemoryAllocator FieldTrialAllocator;
+#endif
// Type for function pointer passed to |AllParamsToString| used to escape
// special characters from |input|.
@@ -598,7 +595,7 @@
// list of handles to be inherited.
static void AppendFieldTrialHandleIfNeeded(
base::HandlesToInheritVector* handles);
-#elif defined(OS_FUCHSIA)
+#elif defined(OS_FUCHSIA) || defined(STARBOARD)
// TODO(fuchsia): Implement shared-memory configuration (crbug.com/752368).
#elif defined(OS_POSIX) && !defined(OS_NACL)
// On POSIX, we also need to explicitly pass down this file descriptor that
@@ -717,7 +714,7 @@
// Returns true on success, false on failure.
// |switch_value| also contains the serialized GUID.
static bool CreateTrialsFromSwitchValue(const std::string& switch_value);
-#elif defined(OS_POSIX) && !defined(OS_NACL)
+#elif defined(OS_POSIX) && !defined(OS_NACL) && !defined(STARBOARD)
// On POSIX systems that use the zygote, we look up the correct fd that backs
// the shared memory segment containing the field trials by looking it up via
// an fd key in GlobalDescriptors. Returns true on success, false on failure.
@@ -726,8 +723,9 @@
const std::string& switch_value);
#endif
- // Takes an unmapped SharedMemoryHandle, creates a SharedMemory object from it
- // and maps it with the correct size.
+#if !defined(STARBOARD)
+ // Takes an unmapped SharedMemoryHandle, creates a SharedMemory object from
+ // it and maps it with the correct size.
static bool CreateTrialsFromSharedMemoryHandle(SharedMemoryHandle shm_handle);
// Expects a mapped piece of shared memory |shm| that was created from the
@@ -737,6 +735,7 @@
// successful and false otherwise.
static bool CreateTrialsFromSharedMemory(
std::unique_ptr<base::SharedMemory> shm);
+#endif
// Instantiate the field trial allocator, add all existing field trials to it,
// and duplicates its handle to a read-only handle, which gets stored in
@@ -813,5 +812,4 @@
} // namespace base
-#endif // #if defined(STARBOARD)
#endif // BASE_METRICS_FIELD_TRIAL_H_
diff --git a/base/metrics/field_trial_params.h b/base/metrics/field_trial_params.h
index 0548945..8682226 100644
--- a/base/metrics/field_trial_params.h
+++ b/base/metrics/field_trial_params.h
@@ -5,8 +5,6 @@
#ifndef BASE_METRICS_FIELD_TRIAL_PARAMS_H_
#define BASE_METRICS_FIELD_TRIAL_PARAMS_H_
-#if !defined(STARBOARD)
-
#include <map>
#include <string>
@@ -257,6 +255,4 @@
} // namespace base
-#endif // !defined(STARBOARD)
-
#endif // BASE_METRICS_FIELD_TRIAL_PARAMS_H_
diff --git a/base/metrics/persistent_memory_allocator.cc b/base/metrics/persistent_memory_allocator.cc
index e30e10e..24ac0c0 100644
--- a/base/metrics/persistent_memory_allocator.cc
+++ b/base/metrics/persistent_memory_allocator.cc
@@ -1128,6 +1128,11 @@
int result = ::msync(const_cast<void*>(data()), length,
MS_INVALIDATE | (sync ? MS_SYNC : MS_ASYNC));
DCHECK_NE(EINVAL, result);
+#elif defined(STARBOARD)
+ // TODO(b/283278127): This wont' work for platforms where
+ // SB_CAN_MAP_EXECUTABLE_MEMORY = 0. That's nxswitch, tvos, and playstation.
+ // Figure out how to make this work for all platforms.
+ SbMemoryFlush(const_cast<void*>(data()), length);
#else
#error Unsupported OS.
#endif
diff --git a/base/metrics/persistent_memory_allocator.h b/base/metrics/persistent_memory_allocator.h
index ec85c3a..bcc9736 100644
--- a/base/metrics/persistent_memory_allocator.h
+++ b/base/metrics/persistent_memory_allocator.h
@@ -21,7 +21,9 @@
class HistogramBase;
class MemoryMappedFile;
+#if !defined(STARBOARD)
class SharedMemory;
+#endif
// Simple allocator for pieces of a memory block that may be persistent
// to some storage or shared across multiple processes. This class resides
@@ -737,7 +739,7 @@
#endif // !defined(STARBOARD)
// NACL doesn't support any kind of file access in build.
-#if !defined(OS_NACL) && !defined(STARBOARD)
+#if !defined(OS_NACL) || !defined(STARBOARD)
// This allocator takes a memory-mapped file object and performs allocation
// from it. The allocator takes ownership of the file object.
class BASE_EXPORT FilePersistentMemoryAllocator
diff --git a/base/optional_unittest.cc b/base/optional_unittest.cc
index d504d2b..9fce853 100644
--- a/base/optional_unittest.cc
+++ b/base/optional_unittest.cc
@@ -1921,12 +1921,12 @@
}
{
- Optional<std::string> o = make_optional(std::string("foo"));
+ Optional<std::string> o = base::make_optional(std::string("foo"));
EXPECT_TRUE(o);
EXPECT_EQ("foo", *o);
std::string value = "bar";
- o = make_optional(std::move(value));
+ o = base::make_optional(std::move(value));
EXPECT_TRUE(o);
EXPECT_EQ(std::string("bar"), *o);
}
@@ -1968,7 +1968,7 @@
EXPECT_EQ("123", *str1);
auto str2 =
- make_optional<std::string>({'a', 'b', 'c'}, std::allocator<char>());
+ base::make_optional<std::string>({'a', 'b', 'c'}, std::allocator<char>());
EXPECT_EQ("abc", *str2);
}
}
diff --git a/base/sequenced_task_runner.cc b/base/sequenced_task_runner.cc
index 86771c6..0e12121 100644
--- a/base/sequenced_task_runner.cc
+++ b/base/sequenced_task_runner.cc
@@ -3,12 +3,48 @@
// found in the LICENSE file.
#include "base/sequenced_task_runner.h"
+#include "base/message_loop/message_loop.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/trace_event/trace_event.h"
#include <utility>
#include "base/bind.h"
namespace base {
+#if defined(STARBOARD)
+namespace {
+
+// Runs the given task, and then signals the given WaitableEvent.
+void RunAndSignal(const base::Closure& task, base::WaitableEvent* event) {
+ TRACE_EVENT0("task", "RunAndSignal");
+ task.Run();
+ event->Signal();
+}
+} // namespace
+
+void SequencedTaskRunner::PostBlockingTask(const base::Location& from_here,
+ const base::Closure& task) {
+ TRACE_EVENT0("task", "MessageLoop::PostBlockingTask");
+ DCHECK(!RunsTasksInCurrentSequence())
+ << "PostBlockingTask can't be called from the MessageLoop's own thread. "
+ << from_here.ToString();
+ DCHECK(!task.is_null()) << from_here.ToString();
+
+ base::WaitableEvent task_finished(
+ base::WaitableEvent::ResetPolicy::MANUAL,
+ base::WaitableEvent::InitialState::NOT_SIGNALED);
+ bool task_may_run = PostTask(from_here,
+ base::Bind(&RunAndSignal, task, base::Unretained(&task_finished)));
+ DCHECK(task_may_run)
+ << "Task that will never run posted with PostBlockingTask.";
+
+ if (task_may_run) {
+ // Wait for the task to complete before proceeding.
+ task_finished.Wait();
+ }
+}
+#endif
bool SequencedTaskRunner::PostNonNestableTask(const Location& from_here,
OnceClosure task) {
diff --git a/base/sequenced_task_runner.h b/base/sequenced_task_runner.h
index 8138414..c26c3a9 100644
--- a/base/sequenced_task_runner.h
+++ b/base/sequenced_task_runner.h
@@ -8,6 +8,7 @@
#include <memory>
#include "base/base_export.h"
+#include "base/bind.h"
#include "base/callback.h"
#include "base/sequenced_task_runner_helpers.h"
#include "base/task_runner.h"
@@ -140,6 +141,25 @@
object);
}
+#if defined(STARBOARD)
+ // Like PostTask, but blocks until the posted task completes. Returns false
+ // and does not block if task was not posted.
+ virtual void PostBlockingTask(const base::Location& from_here,
+ const Closure& task);
+
+ // Adds a fence at the end of this MessageLoop's task queue, and then blocks
+ // until it has been reached. It is forbidden to call this method from the
+ // thread of the MessageLoop being posted to. One should exercise extreme
+ // caution when using this, as blocking between MessageLoops can cause
+ // deadlocks and is contraindicated in the Actor model of multiprogramming.
+ void WaitForFence() {
+ struct Fence {
+ static void Task() {}
+ };
+ PostBlockingTask(FROM_HERE, base::Bind(&Fence::Task));
+ }
+#endif
+
protected:
~SequencedTaskRunner() override = default;
diff --git a/base/single_thread_task_runner.h b/base/single_thread_task_runner.h
index 74741d3..f8ba5a1 100644
--- a/base/single_thread_task_runner.h
+++ b/base/single_thread_task_runner.h
@@ -32,7 +32,7 @@
// Like PostTask, but blocks until the posted task completes. Returns false
// and does not block if task was not posted.
virtual void PostBlockingTask(const base::Location& from_here,
- const Closure& task);
+ const Closure& task) override;
// Adds a fence at the end of this MessageLoop's task queue, and then blocks
// until it has been reached. It is forbidden to call this method from the
diff --git a/base/strings/string_number_conversions.cc b/base/strings/string_number_conversions.cc
index 9995ac8..5d9c326 100644
--- a/base/strings/string_number_conversions.cc
+++ b/base/strings/string_number_conversions.cc
@@ -17,7 +17,6 @@
#include "base/scoped_clear_last_error.h"
#include "base/strings/utf_string_conversions.h"
#include "base/third_party/dmg_fp/dmg_fp.h"
-#include "starboard/character.h"
#include "starboard/common/string.h"
#include "starboard/types.h"
diff --git a/base/third_party/symbolize/BUILD.gn b/base/third_party/symbolize/BUILD.gn
index 0dc7c2f..63febc8 100644
--- a/base/third_party/symbolize/BUILD.gn
+++ b/base/third_party/symbolize/BUILD.gn
@@ -1,4 +1,4 @@
-# Copyright (c) 2013 The Chromium Authors. All rights reserved.
+# Copyright 2013 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -13,6 +13,13 @@
print_unsymbolized_stack_traces = is_asan || is_lsan || is_msan || is_tsan
}
+config("symbolize_config") {
+ defines = [
+ # Only built for Linux and ChromeOS so no special export magic needed.
+ "GLOG_EXPORT=",
+ ]
+}
+
static_library("symbolize") {
visibility = [ "//base/*" ]
sources = [
@@ -27,10 +34,13 @@
]
defines = []
+
if (print_unsymbolized_stack_traces) {
defines += [ "PRINT_UNSYMBOLIZED_STACK_TRACES" ]
}
configs -= [ "//build/config/compiler:chromium_code" ]
configs += [ "//build/config/compiler:no_chromium_code" ]
+
+ public_configs = [ ":symbolize_config" ]
}
diff --git a/base/third_party/symbolize/README.chromium b/base/third_party/symbolize/README.chromium
index c8f50e8..d797349 100644
--- a/base/third_party/symbolize/README.chromium
+++ b/base/third_party/symbolize/README.chromium
@@ -1,23 +1,37 @@
Name: google-glog's symbolization library
-URL: http://code.google.com/p/google-glog/
+URL: https://github.com/google/glog
License: BSD
-The following files are copied AS-IS from:
-http://code.google.com/p/google-glog/source/browse/#svn/trunk/src (r76)
+The following files were copied from:
+https://github.com/google/glog/tree/b70ea80433c2a8f20b832be97b90f1f82b0d29e9
-- demangle.cc
-- demangle.h
-- symbolize.h
-
-The following files are minimal stubs created for use in Chromium:
-
-- config.h
- glog/logging.h
- glog/raw_logging.h
+- demangle.cc
+- demangle.h
+- symbolize.cc
+- symbolize.h
- utilities.h
-The following file has been copied from:
-http://code.google.com/p/google-glog/source/browse/#svn/trunk/src (r76)
- and then trivially modified to compile in C++11.
+config.h is auto-generated using the glog build and then trimmed down to only
+the macros used in the files above.
-- symbolize.cc
+Local modifications:
+- 001-fix-up-includes.patch: remove includes for "base/mutex.h" and change
+ logging headers to be included as user headers rather than system headers.
+- 002-minimal-logging.patch: remove everything except a RAW_LOG() macro that
+ does nothing.
+- 003-minimal-utilities.patch: remove everything except a macro for wrapping
+ the noninline compiler attribute.
+- 004-add-missing-symbolize-header.patch: add an include for symbolize.h to
+ symbolize.cc. This patch should be upstreamed.
+- 005-expose-file-helpers.patch: expose helpers for working with symbol files.
+- 006-use-sandbox-hook-for-open-object-file.patch: use the sandbox hook for
+ the exposed helper for opening object files. This patch should be upstreamed.
+- 007-sys-types-h.patch: include <sys/types.h> to get ssize_t on non-glibc
+ platforms.
+- 008-include-cstdlib.patch: include <cstdlib> for abort() rather than relying
+ on transitive includes.
+- 009-clone-absl-demangle.patch: Clone the demangling implementation from
+ abseil-cpp, which is itself a fork of https://github.com/google/glog/.
+- 010-clang-format.patch: format the source files using Chrome formatting rules.
diff --git a/base/third_party/symbolize/config.h b/base/third_party/symbolize/config.h
index 945f5a6..78654d9 100644
--- a/base/third_party/symbolize/config.h
+++ b/base/third_party/symbolize/config.h
@@ -1,7 +1,39 @@
-// Copyright (c) 2010 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
+#ifndef GLOG_CONFIG_H
+#define GLOG_CONFIG_H
+/* Namespace for Google classes */
#define GOOGLE_NAMESPACE google
+
+/* Define to 1 if you have the <dlfcn.h> header file. */
+#define HAVE_DLFCN_H
+
+/* define if your compiler has __attribute__ */
+#define HAVE___ATTRIBUTE__
+
+/* define if symbolize support is available */
+#define HAVE_SYMBOLIZE
+
+/* The size of `void *', as computed by sizeof. */
+#if defined(__LP64__)
+#define SIZEOF_VOID_P 8
+#else
+#define SIZEOF_VOID_P 4
+#endif
+
+#ifdef GLOG_BAZEL_BUILD
+
+/* TODO(rodrigoq): remove this workaround once bazel#3979 is resolved:
+ * https://github.com/bazelbuild/bazel/issues/3979 */
+#define _START_GOOGLE_NAMESPACE_ namespace GOOGLE_NAMESPACE {
#define _END_GOOGLE_NAMESPACE_ }
+
+#else
+
+/* Stops putting the code inside the Google namespace */
+#define _END_GOOGLE_NAMESPACE_ }
+
+/* Puts following code inside the Google namespace */
#define _START_GOOGLE_NAMESPACE_ namespace google {
+#endif
+
+#endif // GLOG_CONFIG_H
diff --git a/base/third_party/symbolize/demangle.cc b/base/third_party/symbolize/demangle.cc
index db11e5c..8db75f0 100644
--- a/base/third_party/symbolize/demangle.cc
+++ b/base/third_party/symbolize/demangle.cc
@@ -28,132 +28,221 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Author: Satoru Takabayashi
-
-// Note for Cobalt: Cobalt Starboard depends on the old version of Symbolize so
-// this file is from m27 Chromium. There are no Cobalt-introduced changes in
-// this file.
+//
+// For reference check out:
+// http://itanium-cxx-abi.github.io/cxx-abi/abi.html#mangling
+//
+// Note that we only have partial C++0x support yet.
#include "demangle.h"
-#include <stdio.h> // for NULL
+
+#if defined(GLOG_OS_WINDOWS)
+#include <dbghelp.h>
+#else
+#include <cstdint>
+#include <cstdio>
+#include <limits>
+#endif
_START_GOOGLE_NAMESPACE_
+#if !defined(GLOG_OS_WINDOWS)
typedef struct {
const char *abbrev;
const char *real_name;
+ // Number of arguments in <expression> context, or 0 if disallowed.
+ int arity;
} AbbrevPair;
// List of operators from Itanium C++ ABI.
static const AbbrevPair kOperatorList[] = {
- { "nw", "new" },
- { "na", "new[]" },
- { "dl", "delete" },
- { "da", "delete[]" },
- { "ps", "+" },
- { "ng", "-" },
- { "ad", "&" },
- { "de", "*" },
- { "co", "~" },
- { "pl", "+" },
- { "mi", "-" },
- { "ml", "*" },
- { "dv", "/" },
- { "rm", "%" },
- { "an", "&" },
- { "or", "|" },
- { "eo", "^" },
- { "aS", "=" },
- { "pL", "+=" },
- { "mI", "-=" },
- { "mL", "*=" },
- { "dV", "/=" },
- { "rM", "%=" },
- { "aN", "&=" },
- { "oR", "|=" },
- { "eO", "^=" },
- { "ls", "<<" },
- { "rs", ">>" },
- { "lS", "<<=" },
- { "rS", ">>=" },
- { "eq", "==" },
- { "ne", "!=" },
- { "lt", "<" },
- { "gt", ">" },
- { "le", "<=" },
- { "ge", ">=" },
- { "nt", "!" },
- { "aa", "&&" },
- { "oo", "||" },
- { "pp", "++" },
- { "mm", "--" },
- { "cm", "," },
- { "pm", "->*" },
- { "pt", "->" },
- { "cl", "()" },
- { "ix", "[]" },
- { "qu", "?" },
- { "st", "sizeof" },
- { "sz", "sizeof" },
- { NULL, NULL },
+ // New has special syntax (not currently supported).
+ {"nw", "new", 0},
+ {"na", "new[]", 0},
+
+ // Works except that the 'gs' prefix is not supported.
+ {"dl", "delete", 1},
+ {"da", "delete[]", 1},
+
+ {"ps", "+", 1}, // "positive"
+ {"ng", "-", 1}, // "negative"
+ {"ad", "&", 1}, // "address-of"
+ {"de", "*", 1}, // "dereference"
+ {"co", "~", 1},
+
+ {"pl", "+", 2},
+ {"mi", "-", 2},
+ {"ml", "*", 2},
+ {"dv", "/", 2},
+ {"rm", "%", 2},
+ {"an", "&", 2},
+ {"or", "|", 2},
+ {"eo", "^", 2},
+ {"aS", "=", 2},
+ {"pL", "+=", 2},
+ {"mI", "-=", 2},
+ {"mL", "*=", 2},
+ {"dV", "/=", 2},
+ {"rM", "%=", 2},
+ {"aN", "&=", 2},
+ {"oR", "|=", 2},
+ {"eO", "^=", 2},
+ {"ls", "<<", 2},
+ {"rs", ">>", 2},
+ {"lS", "<<=", 2},
+ {"rS", ">>=", 2},
+ {"eq", "==", 2},
+ {"ne", "!=", 2},
+ {"lt", "<", 2},
+ {"gt", ">", 2},
+ {"le", "<=", 2},
+ {"ge", ">=", 2},
+ {"nt", "!", 1},
+ {"aa", "&&", 2},
+ {"oo", "||", 2},
+ {"pp", "++", 1},
+ {"mm", "--", 1},
+ {"cm", ",", 2},
+ {"pm", "->*", 2},
+ {"pt", "->", 0}, // Special syntax
+ {"cl", "()", 0}, // Special syntax
+ {"ix", "[]", 2},
+ {"qu", "?", 3},
+ {"st", "sizeof", 0}, // Special syntax
+ {"sz", "sizeof", 1}, // Not a real operator name, but used in expressions.
+ {nullptr, nullptr, 0},
};
// List of builtin types from Itanium C++ ABI.
+//
+// Invariant: only one- or two-character type abbreviations here.
static const AbbrevPair kBuiltinTypeList[] = {
- { "v", "void" },
- { "w", "wchar_t" },
- { "b", "bool" },
- { "c", "char" },
- { "a", "signed char" },
- { "h", "unsigned char" },
- { "s", "short" },
- { "t", "unsigned short" },
- { "i", "int" },
- { "j", "unsigned int" },
- { "l", "long" },
- { "m", "unsigned long" },
- { "x", "long long" },
- { "y", "unsigned long long" },
- { "n", "__int128" },
- { "o", "unsigned __int128" },
- { "f", "float" },
- { "d", "double" },
- { "e", "long double" },
- { "g", "__float128" },
- { "z", "ellipsis" },
- { NULL, NULL }
+ {"v", "void", 0},
+ {"w", "wchar_t", 0},
+ {"b", "bool", 0},
+ {"c", "char", 0},
+ {"a", "signed char", 0},
+ {"h", "unsigned char", 0},
+ {"s", "short", 0},
+ {"t", "unsigned short", 0},
+ {"i", "int", 0},
+ {"j", "unsigned int", 0},
+ {"l", "long", 0},
+ {"m", "unsigned long", 0},
+ {"x", "long long", 0},
+ {"y", "unsigned long long", 0},
+ {"n", "__int128", 0},
+ {"o", "unsigned __int128", 0},
+ {"f", "float", 0},
+ {"d", "double", 0},
+ {"e", "long double", 0},
+ {"g", "__float128", 0},
+ {"z", "ellipsis", 0},
+
+ {"De", "decimal128", 0}, // IEEE 754r decimal floating point (128 bits)
+ {"Dd", "decimal64", 0}, // IEEE 754r decimal floating point (64 bits)
+ {"Dc", "decltype(auto)", 0},
+ {"Da", "auto", 0},
+ {"Dn", "std::nullptr_t", 0}, // i.e., decltype(nullptr)
+ {"Df", "decimal32", 0}, // IEEE 754r decimal floating point (32 bits)
+ {"Di", "char32_t", 0},
+ {"Du", "char8_t", 0},
+ {"Ds", "char16_t", 0},
+ {"Dh", "float16", 0}, // IEEE 754r half-precision float (16 bits)
+ {nullptr, nullptr, 0},
};
// List of substitutions Itanium C++ ABI.
static const AbbrevPair kSubstitutionList[] = {
- { "St", "" },
- { "Sa", "allocator" },
- { "Sb", "basic_string" },
- // std::basic_string<char, std::char_traits<char>,std::allocator<char> >
- { "Ss", "string"},
- // std::basic_istream<char, std::char_traits<char> >
- { "Si", "istream" },
- // std::basic_ostream<char, std::char_traits<char> >
- { "So", "ostream" },
- // std::basic_iostream<char, std::char_traits<char> >
- { "Sd", "iostream" },
- { NULL, NULL }
+ {"St", "", 0},
+ {"Sa", "allocator", 0},
+ {"Sb", "basic_string", 0},
+ // std::basic_string<char, std::char_traits<char>,std::allocator<char> >
+ {"Ss", "string", 0},
+ // std::basic_istream<char, std::char_traits<char> >
+ {"Si", "istream", 0},
+ // std::basic_ostream<char, std::char_traits<char> >
+ {"So", "ostream", 0},
+ // std::basic_iostream<char, std::char_traits<char> >
+ {"Sd", "iostream", 0},
+ {nullptr, nullptr, 0},
};
-// State needed for demangling.
+// State needed for demangling. This struct is copied in almost every stack
+// frame, so every byte counts.
typedef struct {
- const char *mangled_cur; // Cursor of mangled name.
- const char* mangled_end; // End of mangled name.
- char *out_cur; // Cursor of output string.
- const char *out_begin; // Beginning of output string.
- const char *out_end; // End of output string.
- const char *prev_name; // For constructors/destructors.
- int prev_name_length; // For constructors/destructors.
- int nest_level; // For nested names.
- int number; // Remember the previous number.
- bool append; // Append flag.
- bool overflowed; // True if output gets overflowed.
+ int mangled_idx; // Cursor of mangled name.
+ int out_cur_idx; // Cursor of output string.
+ int prev_name_idx; // For constructors/destructors.
+ unsigned int prev_name_length : 16; // For constructors/destructors.
+ signed int nest_level : 15; // For nested names.
+ unsigned int append : 1; // Append flag.
+ // Note: for some reason MSVC can't pack "bool append : 1" into the same int
+ // with the above two fields, so we use an int instead. Amusingly it can pack
+ // "signed bool" as expected, but relying on that to continue to be a legal
+ // type seems ill-advised (as it's illegal in at least clang).
+} ParseState;
+
+static_assert(sizeof(ParseState) == 4 * sizeof(int),
+ "unexpected size of ParseState");
+
+// One-off state for demangling that's not subject to backtracking -- either
+// constant data, data that's intentionally immune to backtracking (steps), or
+// data that would never be changed by backtracking anyway (recursion_depth).
+//
+// Only one copy of this exists for each call to Demangle, so the size of this
+// struct is nearly inconsequential.
+typedef struct {
+ const char* mangled_begin; // Beginning of input string.
+ char* out; // Beginning of output string.
+ int out_end_idx; // One past last allowed output character.
+ int recursion_depth; // For stack exhaustion prevention.
+ int steps; // Cap how much work we'll do, regardless of depth.
+ ParseState parse_state; // Backtrackable state copied for most frames.
} State;
+namespace {
+// Prevent deep recursion / stack exhaustion.
+// Also prevent unbounded handling of complex inputs.
+class ComplexityGuard {
+ public:
+ explicit ComplexityGuard(State* state) : state_(state) {
+ ++state->recursion_depth;
+ ++state->steps;
+ }
+ ~ComplexityGuard() { --state_->recursion_depth; }
+
+ // 256 levels of recursion seems like a reasonable upper limit on depth.
+ // 128 is not enough to demagle synthetic tests from demangle_unittest.txt:
+ // "_ZaaZZZZ..." and "_ZaaZcvZcvZ..."
+ static constexpr int kRecursionDepthLimit = 256;
+
+ // We're trying to pick a charitable upper-limit on how many parse steps are
+ // necessary to handle something that a human could actually make use of.
+ // This is mostly in place as a bound on how much work we'll do if we are
+ // asked to demangle an mangled name from an untrusted source, so it should be
+ // much larger than the largest expected symbol, but much smaller than the
+ // amount of work we can do in, e.g., a second.
+ //
+ // Some real-world symbols from an arbitrary binary started failing between
+ // 2^12 and 2^13, so we multiply the latter by an extra factor of 16 to set
+ // the limit.
+ //
+ // Spending one second on 2^17 parse steps would require each step to take
+ // 7.6us, or ~30000 clock cycles, so it's safe to say this can be done in
+ // under a second.
+ static constexpr int kParseStepsLimit = 1 << 17;
+
+ bool IsTooComplex() const {
+ return state_->recursion_depth > kRecursionDepthLimit ||
+ state_->steps > kParseStepsLimit;
+ }
+
+ private:
+ State* state_;
+};
+} // namespace
+
// We don't use strlen() in libc since it's not guaranteed to be async
// signal safe.
static size_t StrLen(const char *str) {
@@ -165,52 +254,73 @@
return len;
}
+// Returns true if "str" has at least "n" characters remaining.
+static bool AtLeastNumCharsRemaining(const char* str, size_t n) {
+ for (size_t i = 0; i < n; ++i) {
+ if (str[i] == '\0') {
+ return false;
+ }
+ }
+ return true;
+}
+
// Returns true if "str" has "prefix" as a prefix.
static bool StrPrefix(const char *str, const char *prefix) {
size_t i = 0;
- while (str[i] != '\0' && prefix[i] != '\0' &&
- str[i] == prefix[i]) {
+ while (str[i] != '\0' && prefix[i] != '\0' && str[i] == prefix[i]) {
++i;
}
return prefix[i] == '\0'; // Consumed everything in "prefix".
}
-static void InitState(State *state, const char *mangled,
- char *out, int out_size) {
- state->mangled_cur = mangled;
- state->mangled_end = mangled + StrLen(mangled);
- state->out_cur = out;
- state->out_begin = out;
- state->out_end = out + out_size;
- state->prev_name = NULL;
- state->prev_name_length = -1;
- state->nest_level = -1;
- state->number = -1;
- state->append = true;
- state->overflowed = false;
+static void InitState(State* state,
+ const char* mangled,
+ char* out,
+ size_t out_size) {
+ state->mangled_begin = mangled;
+ state->out = out;
+ state->out_end_idx = static_cast<int>(out_size);
+ state->recursion_depth = 0;
+ state->steps = 0;
+
+ state->parse_state.mangled_idx = 0;
+ state->parse_state.out_cur_idx = 0;
+ state->parse_state.prev_name_idx = 0;
+ state->parse_state.prev_name_length = 0;
+ state->parse_state.nest_level = -1;
+ state->parse_state.append = true;
}
-// Calculates the remaining length of the mangled name.
-static int RemainingLength(State* state) {
- return state->mangled_end - state->mangled_cur;
+static inline const char* RemainingInput(State* state) {
+ return &state->mangled_begin[state->parse_state.mangled_idx];
}
-// Returns true and advances "mangled_cur" if we find "c" at
-// "mangled_cur" position.
-static bool ParseChar(State* state, const char c) {
- if (RemainingLength(state) >= 1 && *state->mangled_cur == c) {
- ++state->mangled_cur;
+// Returns true and advances "mangled_idx" if we find "one_char_token"
+// at "mangled_idx" position. It is assumed that "one_char_token" does
+// not contain '\0'.
+static bool ParseOneCharToken(State *state, const char one_char_token) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) {
+ return false;
+ }
+ if (RemainingInput(state)[0] == one_char_token) {
+ ++state->parse_state.mangled_idx;
return true;
}
return false;
}
-// Returns true and advances "mangled_cur" if we find "two_chars" at
-// "mangled_cur" position.
-static bool ParseTwoChar(State* state, const char* two_chars) {
- if (RemainingLength(state) >= 2 && state->mangled_cur[0] == two_chars[0] &&
- state->mangled_cur[1] == two_chars[1]) {
- state->mangled_cur += 2;
+// Returns true and advances "mangled_cur" if we find "two_char_token"
+// at "mangled_cur" position. It is assumed that "two_char_token" does
+// not contain '\0'.
+static bool ParseTwoCharToken(State *state, const char *two_char_token) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) {
+ return false;
+ }
+ if (RemainingInput(state)[0] == two_char_token[0] &&
+ RemainingInput(state)[1] == two_char_token[1]) {
+ state->parse_state.mangled_idx += 2;
return true;
}
return false;
@@ -219,21 +329,36 @@
// Returns true and advances "mangled_cur" if we find any character in
// "char_class" at "mangled_cur" position.
static bool ParseCharClass(State *state, const char *char_class) {
- if (state->mangled_cur == state->mangled_end) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) {
+ return false;
+ }
+ if (RemainingInput(state)[0] == '\0') {
return false;
}
const char *p = char_class;
for (; *p != '\0'; ++p) {
- if (*state->mangled_cur == *p) {
- state->mangled_cur += 1;
+ if (RemainingInput(state)[0] == *p) {
+ ++state->parse_state.mangled_idx;
return true;
}
}
return false;
}
+static bool ParseDigit(State* state, int* digit) {
+ char c = RemainingInput(state)[0];
+ if (ParseCharClass(state, "0123456789")) {
+ if (digit != nullptr) {
+ *digit = c - '0';
+ }
+ return true;
+ }
+ return false;
+}
+
// This function is used for handling an optional non-terminal.
-static bool Optional(bool status) {
+static bool Optional(bool /*status*/) {
return true;
}
@@ -248,22 +373,33 @@
return false;
}
-// Append "str" at "out_cur". If there is an overflow, "overflowed"
-// is set to true for later use. The output string is ensured to
+// This function is used for handling <non-terminal>* syntax. The function
+// always returns true and must be followed by a termination token or a
+// terminating sequence not handled by parse_func (e.g.
+// ParseOneCharToken(state, 'E')).
+static bool ZeroOrMore(ParseFunc parse_func, State *state) {
+ while (parse_func(state)) {
+ }
+ return true;
+}
+
+// Append "str" at "out_cur_idx". If there is an overflow, out_cur_idx is
+// set to out_end_idx+1. The output string is ensured to
// always terminate with '\0' as long as there is no overflow.
-static void Append(State *state, const char * const str, const int length) {
- int i;
- for (i = 0; i < length; ++i) {
- if (state->out_cur + 1 < state->out_end) { // +1 for '\0'
- *state->out_cur = str[i];
- ++state->out_cur;
+static void Append(State* state, const char* const str, const size_t length) {
+ for (size_t i = 0; i < length; ++i) {
+ if (state->parse_state.out_cur_idx + 1 <
+ state->out_end_idx) { // +1 for '\0'
+ state->out[state->parse_state.out_cur_idx++] = str[i];
} else {
- state->overflowed = true;
+ // signal overflow
+ state->parse_state.out_cur_idx = state->out_end_idx + 1;
break;
}
}
- if (!state->overflowed) {
- *state->out_cur = '\0'; // Terminate it with '\0'
+ if (state->parse_state.out_cur_idx < state->out_end_idx) {
+ state->out[state->parse_state.out_cur_idx] =
+ '\0'; // Terminate it with '\0'
}
}
@@ -273,33 +409,100 @@
}
static bool IsAlpha(char c) {
- return ((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z'));
+ return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z');
+}
+
+static bool IsDigit(char c) {
+ return c >= '0' && c <= '9';
+}
+
+// Returns true if "str" is a function clone suffix. These suffixes are used
+// by GCC 4.5.x and later versions (and our locally-modified version of GCC
+// 4.4.x) to indicate functions which have been cloned during optimization.
+// We treat any sequence (.<alpha>+.<digit>+)+ as a function clone suffix.
+// Additionally, '_' is allowed along with the alphanumeric sequence.
+static bool IsFunctionCloneSuffix(const char *str) {
+ size_t i = 0;
+ while (str[i] != '\0') {
+ bool parsed = false;
+ // Consume a single [.<alpha> | _]*[.<digit>]* sequence.
+ if (str[i] == '.' && (IsAlpha(str[i + 1]) || str[i + 1] == '_')) {
+ parsed = true;
+ i += 2;
+ while (IsAlpha(str[i]) || str[i] == '_') {
+ ++i;
+ }
+ }
+ if (str[i] == '.' && IsDigit(str[i + 1])) {
+ parsed = true;
+ i += 2;
+ while (IsDigit(str[i])) {
+ ++i;
+ }
+ }
+ if (!parsed) {
+ return false;
+ }
+ }
+ return true; // Consumed everything in "str".
+}
+
+static bool EndsWith(State* state, const char chr) {
+ return state->parse_state.out_cur_idx > 0 &&
+ state->parse_state.out_cur_idx < state->out_end_idx &&
+ chr == state->out[state->parse_state.out_cur_idx - 1];
}
// Append "str" with some tweaks, iff "append" state is true.
-// Returns true so that it can be placed in "if" conditions.
-static void MaybeAppendWithLength(State *state, const char * const str,
- const int length) {
- if (state->append && length > 0) {
+static void MaybeAppendWithLength(State* state,
+ const char* const str,
+ const size_t length) {
+ if (state->parse_state.append && length > 0) {
// Append a space if the output buffer ends with '<' and "str"
// starts with '<' to avoid <<<.
- if (str[0] == '<' && state->out_begin < state->out_cur &&
- state->out_cur[-1] == '<') {
+ if (str[0] == '<' && EndsWith(state, '<')) {
Append(state, " ", 1);
}
- // Remember the last identifier name for ctors/dtors.
- if (IsAlpha(str[0]) || str[0] == '_') {
- state->prev_name = state->out_cur;
- state->prev_name_length = length;
+ // Remember the last identifier name for ctors/dtors,
+ // but only if we haven't yet overflown the buffer.
+ if (state->parse_state.out_cur_idx < state->out_end_idx &&
+ (IsAlpha(str[0]) || str[0] == '_')) {
+ state->parse_state.prev_name_idx = state->parse_state.out_cur_idx;
+ state->parse_state.prev_name_length = static_cast<unsigned int>(length);
}
Append(state, str, length);
}
}
-// A convenient wrapper arount MaybeAppendWithLength().
-static bool MaybeAppend(State *state, const char * const str) {
- if (state->append) {
- int length = StrLen(str);
+// Appends a positive decimal number to the output if appending is enabled.
+static bool MaybeAppendDecimal(State* state, int val) {
+ // Max {32-64}-bit unsigned int is 20 digits.
+ constexpr size_t kMaxLength = 20;
+ char buf[kMaxLength];
+
+ // We can't use itoa or sprintf as neither is specified to be
+ // async-signal-safe.
+ if (state->parse_state.append) {
+ // We can't have a one-before-the-beginning pointer, so instead start with
+ // one-past-the-end and manipulate one character before the pointer.
+ char* p = &buf[kMaxLength];
+ do { // val=0 is the only input that should write a leading zero digit.
+ *--p = static_cast<char>((val % 10) + '0');
+ val /= 10;
+ } while (p > buf && val != 0);
+
+ // 'p' landed on the last character we set. How convenient.
+ Append(state, p, kMaxLength - static_cast<size_t>(p - buf));
+ }
+
+ return true;
+}
+
+// A convenient wrapper around MaybeAppendWithLength().
+// Returns true so that it can be placed in "if" conditions.
+static bool MaybeAppend(State* state, const char* const str) {
+ if (state->parse_state.append) {
+ size_t length = StrLen(str);
MaybeAppendWithLength(state, str, length);
}
return true;
@@ -307,80 +510,83 @@
// This function is used for handling nested names.
static bool EnterNestedName(State *state) {
- state->nest_level = 0;
+ state->parse_state.nest_level = 0;
return true;
}
// This function is used for handling nested names.
-static bool LeaveNestedName(State* state, int prev_value) {
- state->nest_level = prev_value;
+static bool LeaveNestedName(State* state, int16_t prev_value) {
+ state->parse_state.nest_level = prev_value;
return true;
}
// Disable the append mode not to print function parameters, etc.
static bool DisableAppend(State *state) {
- state->append = false;
+ state->parse_state.append = false;
return true;
}
// Restore the append mode to the previous state.
static bool RestoreAppend(State *state, bool prev_value) {
- state->append = prev_value;
+ state->parse_state.append = prev_value;
return true;
}
// Increase the nest level for nested names.
static void MaybeIncreaseNestLevel(State *state) {
- if (state->nest_level > -1) {
- ++state->nest_level;
+ if (state->parse_state.nest_level > -1) {
+ ++state->parse_state.nest_level;
}
}
// Appends :: for nested names if necessary.
static void MaybeAppendSeparator(State *state) {
- if (state->nest_level >= 1) {
+ if (state->parse_state.nest_level >= 1) {
MaybeAppend(state, "::");
}
}
// Cancel the last separator if necessary.
static void MaybeCancelLastSeparator(State *state) {
- if (state->nest_level >= 1 && state->append &&
- state->out_begin <= state->out_cur - 2) {
- state->out_cur -= 2;
- *state->out_cur = '\0';
+ if (state->parse_state.nest_level >= 1 && state->parse_state.append &&
+ state->parse_state.out_cur_idx >= 2) {
+ state->parse_state.out_cur_idx -= 2;
+ state->out[state->parse_state.out_cur_idx] = '\0';
}
}
-// Returns true if identifier pointed by "mangled_cur" is anonymous
-// namespace.
-static bool IdentifierIsAnonymousNamespace(State* state) {
- const char anon_prefix[] = "_GLOBAL__N_";
- return (state->number > sizeof(anon_prefix) - 1 && // Should be longer.
- StrPrefix(state->mangled_cur, anon_prefix));
+// Returns true if the identifier of the given length pointed to by
+// "mangled_cur" is anonymous namespace.
+static bool IdentifierIsAnonymousNamespace(State* state, size_t length) {
+ // Returns true if "anon_prefix" is a proper prefix of "mangled_cur".
+ static const char anon_prefix[] = "_GLOBAL__N_";
+ return (length > (sizeof(anon_prefix) - 1) &&
+ StrPrefix(RemainingInput(state), anon_prefix));
}
// Forward declarations of our parsing functions.
static bool ParseMangledName(State *state);
static bool ParseEncoding(State *state);
static bool ParseName(State *state);
-static bool ParseUnscopedName(State *state);
-static bool ParseUnscopedTemplateName(State *state);
+static bool ParseUnscopedName(State* state);
static bool ParseNestedName(State *state);
static bool ParsePrefix(State *state);
static bool ParseUnqualifiedName(State *state);
static bool ParseSourceName(State *state);
static bool ParseLocalSourceName(State *state);
-static bool ParseNumber(State* state);
+static bool ParseUnnamedTypeName(State* state);
+static bool ParseNumber(State *state, int *number_out);
static bool ParseFloatNumber(State *state);
static bool ParseSeqId(State *state);
-static bool ParseIdentifier(State* state);
-static bool ParseOperatorName(State *state);
+static bool ParseIdentifier(State* state, size_t length);
+static bool ParseOperatorName(State* state, int* arity);
static bool ParseSpecialName(State *state);
static bool ParseCallOffset(State *state);
static bool ParseNVOffset(State *state);
static bool ParseVOffset(State *state);
+static bool ParseAbiTags(State* state);
static bool ParseCtorDtorName(State *state);
+static bool ParseDecltype(State* state);
static bool ParseType(State *state);
static bool ParseCVQualifiers(State *state);
static bool ParseBuiltinType(State *state);
@@ -393,11 +599,15 @@
static bool ParseTemplateTemplateParam(State *state);
static bool ParseTemplateArgs(State *state);
static bool ParseTemplateArg(State *state);
+static bool ParseBaseUnresolvedName(State* state);
+static bool ParseUnresolvedName(State* state);
static bool ParseExpression(State *state);
static bool ParseExprPrimary(State *state);
+static bool ParseExprCastValue(State* state);
static bool ParseLocalName(State *state);
+static bool ParseLocalNameSuffix(State* state);
static bool ParseDiscriminator(State *state);
-static bool ParseSubstitution(State *state);
+static bool ParseSubstitution(State* state, bool accept_std);
// Implementation note: the following code is a straightforward
// translation of the Itanium C++ ABI defined in BNF with a couple of
@@ -409,11 +619,12 @@
// - Reorder patterns to give greedier functions precedence
// We'll mark "Less greedy than" for these cases in the code
//
-// Each parsing function changes the state and returns true on
-// success. Otherwise, don't change the state and returns false. To
-// ensure that the state isn't changed in the latter case, we save the
-// original state before we call more than one parsing functions
-// consecutively with &&, and restore the state if unsuccessful. See
+// Each parsing function changes the parse state and returns true on
+// success, or returns false and doesn't change the parse state (note:
+// the parse-steps counter increases regardless of success or failure).
+// To ensure that the parse state isn't changed in the latter case, we
+// save the original state before we call multiple parsing functions
+// consecutively with &&, and restore it if unsuccessful. See
// ParseEncoding() as an example of this convention. We follow the
// convention throughout the code.
//
@@ -427,34 +638,35 @@
//
// Reference:
// - Itanium C++ ABI
-// <http://www.codesourcery.com/cxx-abi/abi.html#mangling>
+// <https://itanium-cxx-abi.github.io/cxx-abi/abi.html#mangling>
// <mangled-name> ::= _Z <encoding>
static bool ParseMangledName(State *state) {
- if (ParseTwoChar(state, "_Z") && ParseEncoding(state)) {
- // Append trailing version suffix if any.
- // ex. _Z3foo@@GLIBCXX_3.4
- if (state->mangled_cur < state->mangled_end &&
- state->mangled_cur[0] == '@') {
- MaybeAppend(state, state->mangled_cur);
- state->mangled_cur = state->mangled_end;
- }
- return true;
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) {
+ return false;
}
- return false;
+ return ParseTwoCharToken(state, "_Z") && ParseEncoding(state);
}
// <encoding> ::= <(function) name> <bare-function-type>
// ::= <(data) name>
// ::= <special-name>
static bool ParseEncoding(State *state) {
- State copy = *state;
- if (ParseName(state) && ParseBareFunctionType(state)) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) {
+ return false;
+ }
+ // Implementing the first two productions together as <name>
+ // [<bare-function-type>] avoids exponential blowup of backtracking.
+ //
+ // Since Optional(...) can't fail, there's no need to copy the state for
+ // backtracking.
+ if (ParseName(state) && Optional(ParseBareFunctionType(state))) {
return true;
}
- *state = copy;
- if (ParseName(state) || ParseSpecialName(state)) {
+ if (ParseSpecialName(state)) {
return true;
}
return false;
@@ -465,56 +677,79 @@
// ::= <unscoped-name>
// ::= <local-name>
static bool ParseName(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) {
+ return false;
+ }
if (ParseNestedName(state) || ParseLocalName(state)) {
return true;
}
- State copy = *state;
- if (ParseUnscopedTemplateName(state) &&
+ // We reorganize the productions to avoid re-parsing unscoped names.
+ // - Inline <unscoped-template-name> productions:
+ // <name> ::= <substitution> <template-args>
+ // ::= <unscoped-name> <template-args>
+ // ::= <unscoped-name>
+ // - Merge the two productions that start with unscoped-name:
+ // <name> ::= <unscoped-name> [<template-args>]
+
+ ParseState copy = state->parse_state;
+ // "std<...>" isn't a valid name.
+ if (ParseSubstitution(state, /*accept_std=*/false) &&
ParseTemplateArgs(state)) {
return true;
}
- *state = copy;
+ state->parse_state = copy;
- // Less greedy than <unscoped-template-name> <template-args>.
- if (ParseUnscopedName(state)) {
- return true;
- }
- return false;
+ // Note there's no need to restore state after this since only the first
+ // subparser can fail.
+ return ParseUnscopedName(state) && Optional(ParseTemplateArgs(state));
}
// <unscoped-name> ::= <unqualified-name>
// ::= St <unqualified-name>
static bool ParseUnscopedName(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) {
+ return false;
+ }
if (ParseUnqualifiedName(state)) {
return true;
}
- State copy = *state;
- if (ParseTwoChar(state, "St") && MaybeAppend(state, "std::") &&
+ ParseState copy = state->parse_state;
+ if (ParseTwoCharToken(state, "St") && MaybeAppend(state, "std::") &&
ParseUnqualifiedName(state)) {
return true;
}
- *state = copy;
+ state->parse_state = copy;
return false;
}
-// <unscoped-template-name> ::= <unscoped-name>
-// ::= <substitution>
-static bool ParseUnscopedTemplateName(State *state) {
- return ParseUnscopedName(state) || ParseSubstitution(state);
+// <ref-qualifer> ::= R // lvalue method reference qualifier
+// ::= O // rvalue method reference qualifier
+static inline bool ParseRefQualifier(State* state) {
+ return ParseCharClass(state, "OR");
}
-// <nested-name> ::= N [<CV-qualifiers>] <prefix> <unqualified-name> E
-// ::= N [<CV-qualifiers>] <template-prefix> <template-args> E
+// <nested-name> ::= N [<CV-qualifiers>] [<ref-qualifier>] <prefix>
+// <unqualified-name> E
+// ::= N [<CV-qualifiers>] [<ref-qualifier>] <template-prefix>
+// <template-args> E
static bool ParseNestedName(State *state) {
- State copy = *state;
- if (ParseChar(state, 'N') && EnterNestedName(state) &&
- Optional(ParseCVQualifiers(state)) && ParsePrefix(state) &&
- LeaveNestedName(state, copy.nest_level) && ParseChar(state, 'E')) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) {
+ return false;
+ }
+ ParseState copy = state->parse_state;
+ if (ParseOneCharToken(state, 'N') && EnterNestedName(state) &&
+ Optional(ParseCVQualifiers(state)) &&
+ Optional(ParseRefQualifier(state)) && ParsePrefix(state) &&
+ LeaveNestedName(state, copy.nest_level) &&
+ ParseOneCharToken(state, 'E')) {
return true;
}
- *state = copy;
+ state->parse_state = copy;
return false;
}
@@ -530,12 +765,17 @@
// ::= <template-param>
// ::= <substitution>
static bool ParsePrefix(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) {
+ return false;
+ }
bool has_something = false;
while (true) {
MaybeAppendSeparator(state);
if (ParseTemplateParam(state) ||
- ParseSubstitution(state) ||
- ParseUnscopedName(state)) {
+ ParseSubstitution(state, /*accept_std=*/true) ||
+ ParseUnscopedName(state) ||
+ (ParseOneCharToken(state, 'M') && ParseUnnamedTypeName(state))) {
has_something = true;
MaybeIncreaseNestLevel(state);
continue;
@@ -550,60 +790,158 @@
return true;
}
-// <unqualified-name> ::= <operator-name>
-// ::= <ctor-dtor-name>
-// ::= <source-name>
-// ::= <local-source-name>
+// <unqualified-name> ::= <operator-name> [<abi-tags>]
+// ::= <ctor-dtor-name> [<abi-tags>]
+// ::= <source-name> [<abi-tags>]
+// ::= <local-source-name> [<abi-tags>]
+// ::= <unnamed-type-name> [<abi-tags>]
+//
+// <local-source-name> is a GCC extension; see below.
static bool ParseUnqualifiedName(State *state) {
- return (ParseOperatorName(state) ||
- ParseCtorDtorName(state) ||
- ParseSourceName(state) ||
- ParseLocalSourceName(state));
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) {
+ return false;
+ }
+ if (ParseOperatorName(state, nullptr) || ParseCtorDtorName(state) ||
+ ParseSourceName(state) || ParseLocalSourceName(state) ||
+ ParseUnnamedTypeName(state)) {
+ return ParseAbiTags(state);
+ }
+ return false;
+}
+
+// <abi-tags> ::= <abi-tag> [<abi-tags>]
+// <abi-tag> ::= B <source-name>
+static bool ParseAbiTags(State* state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) {
+ return false;
+ }
+
+ while (ParseOneCharToken(state, 'B')) {
+ ParseState copy = state->parse_state;
+ MaybeAppend(state, "[abi:");
+
+ if (!ParseSourceName(state)) {
+ state->parse_state = copy;
+ return false;
+ }
+ MaybeAppend(state, "]");
+ }
+
+ return true;
}
// <source-name> ::= <positive length number> <identifier>
static bool ParseSourceName(State *state) {
- State copy = *state;
- if (ParseNumber(state) && ParseIdentifier(state)) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) {
+ return false;
+ }
+ ParseState copy = state->parse_state;
+ int length = -1;
+ if (ParseNumber(state, &length) &&
+ ParseIdentifier(state, static_cast<size_t>(length))) {
return true;
}
- *state = copy;
+ state->parse_state = copy;
return false;
}
// <local-source-name> ::= L <source-name> [<discriminator>]
//
// References:
-// http://gcc.gnu.org/bugzilla/show_bug.cgi?id=31775
-// http://gcc.gnu.org/viewcvs?view=rev&revision=124467
+// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=31775
+// https://gcc.gnu.org/viewcvs?view=rev&revision=124467
static bool ParseLocalSourceName(State *state) {
- State copy = *state;
- if (ParseChar(state, 'L') && ParseSourceName(state) &&
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) {
+ return false;
+ }
+ ParseState copy = state->parse_state;
+ if (ParseOneCharToken(state, 'L') && ParseSourceName(state) &&
Optional(ParseDiscriminator(state))) {
return true;
}
- *state = copy;
+ state->parse_state = copy;
+ return false;
+}
+
+// <unnamed-type-name> ::= Ut [<(nonnegative) number>] _
+// ::= <closure-type-name>
+// <closure-type-name> ::= Ul <lambda-sig> E [<(nonnegative) number>] _
+// <lambda-sig> ::= <(parameter) type>+
+static bool ParseUnnamedTypeName(State* state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) {
+ return false;
+ }
+ ParseState copy = state->parse_state;
+ // Type's 1-based index n is encoded as { "", n == 1; itoa(n-2), otherwise }.
+ // Optionally parse the encoded value into 'which' and add 2 to get the index.
+ int which = -1;
+
+ // Unnamed type local to function or class.
+ if (ParseTwoCharToken(state, "Ut") && Optional(ParseNumber(state, &which)) &&
+ which <= std::numeric_limits<int>::max() - 2 && // Don't overflow.
+ ParseOneCharToken(state, '_')) {
+ MaybeAppend(state, "{unnamed type#");
+ MaybeAppendDecimal(state, 2 + which);
+ MaybeAppend(state, "}");
+ return true;
+ }
+ state->parse_state = copy;
+
+ // Closure type.
+ which = -1;
+ if (ParseTwoCharToken(state, "Ul") && DisableAppend(state) &&
+ OneOrMore(ParseType, state) && RestoreAppend(state, copy.append) &&
+ ParseOneCharToken(state, 'E') && Optional(ParseNumber(state, &which)) &&
+ which <= std::numeric_limits<int>::max() - 2 && // Don't overflow.
+ ParseOneCharToken(state, '_')) {
+ MaybeAppend(state, "{lambda()#");
+ MaybeAppendDecimal(state, 2 + which);
+ MaybeAppend(state, "}");
+ return true;
+ }
+ state->parse_state = copy;
+
return false;
}
// <number> ::= [n] <non-negative decimal integer>
-static bool ParseNumber(State* state) {
- int sign = 1;
- if (ParseChar(state, 'n')) {
- sign = -1;
+// If "number_out" is non-null, then *number_out is set to the value of the
+// parsed number on success.
+static bool ParseNumber(State *state, int *number_out) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) {
+ return false;
}
- const char *p = state->mangled_cur;
- int number = 0;
- for (; p < state->mangled_end; ++p) {
- if ((*p >= '0' && *p <= '9')) {
- number = number * 10 + (*p - '0');
+ bool negative = false;
+ if (ParseOneCharToken(state, 'n')) {
+ negative = true;
+ }
+ const char* p = RemainingInput(state);
+ uint64_t number = 0;
+ for (; *p != '\0'; ++p) {
+ if (IsDigit(*p)) {
+ number = number * 10 + static_cast<uint64_t>(*p - '0');
} else {
break;
}
}
- if (p != state->mangled_cur) { // Conversion succeeded.
- state->mangled_cur = p;
- state->number = number * sign;
+ // Apply the sign with uint64_t arithmetic so overflows aren't UB. Gives
+ // "incorrect" results for out-of-range inputs, but negative values only
+ // appear for literals, which aren't printed.
+ if (negative) {
+ number = ~number + 1;
+ }
+ if (p != RemainingInput(state)) { // Conversion succeeded.
+ state->parse_state.mangled_idx += p - RemainingInput(state);
+ if (number_out != nullptr) {
+ // Note: possibly truncate "number".
+ *number_out = static_cast<int>(number);
+ }
return true;
}
return false;
@@ -612,20 +950,18 @@
// Floating-point literals are encoded using a fixed-length lowercase
// hexadecimal string.
static bool ParseFloatNumber(State *state) {
- const char *p = state->mangled_cur;
- int number = 0;
- for (; p < state->mangled_end; ++p) {
- if ((*p >= '0' && *p <= '9')) {
- number = number * 16 + (*p - '0');
- } else if (*p >= 'a' && *p <= 'f') {
- number = number * 16 + (*p - 'a' + 10);
- } else {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) {
+ return false;
+ }
+ const char* p = RemainingInput(state);
+ for (; *p != '\0'; ++p) {
+ if (!IsDigit(*p) && !(*p >= 'a' && *p <= 'f')) {
break;
}
}
- if (p != state->mangled_cur) { // Conversion succeeded.
- state->mangled_cur = p;
- state->number = number;
+ if (p != RemainingInput(state)) { // Conversion succeeded.
+ state->parse_state.mangled_idx += p - RemainingInput(state);
return true;
}
return false;
@@ -634,80 +970,91 @@
// The <seq-id> is a sequence number in base 36,
// using digits and upper case letters
static bool ParseSeqId(State *state) {
- const char *p = state->mangled_cur;
- int number = 0;
- for (; p < state->mangled_end; ++p) {
- if ((*p >= '0' && *p <= '9')) {
- number = number * 36 + (*p - '0');
- } else if (*p >= 'A' && *p <= 'Z') {
- number = number * 36 + (*p - 'A' + 10);
- } else {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) {
+ return false;
+ }
+ const char* p = RemainingInput(state);
+ for (; *p != '\0'; ++p) {
+ if (!IsDigit(*p) && !(*p >= 'A' && *p <= 'Z')) {
break;
}
}
- if (p != state->mangled_cur) { // Conversion succeeded.
- state->mangled_cur = p;
- state->number = number;
+ if (p != RemainingInput(state)) { // Conversion succeeded.
+ state->parse_state.mangled_idx += p - RemainingInput(state);
return true;
}
return false;
}
-// <identifier> ::= <unqualified source code identifier>
-static bool ParseIdentifier(State* state) {
- if (state->number == -1 || RemainingLength(state) < state->number) {
+// <identifier> ::= <unqualified source code identifier> (of given length)
+static bool ParseIdentifier(State* state, size_t length) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) {
return false;
}
- if (IdentifierIsAnonymousNamespace(state)) {
+ if (!AtLeastNumCharsRemaining(RemainingInput(state), length)) {
+ return false;
+ }
+ if (IdentifierIsAnonymousNamespace(state, length)) {
MaybeAppend(state, "(anonymous namespace)");
} else {
- MaybeAppendWithLength(state, state->mangled_cur, state->number);
+ MaybeAppendWithLength(state, RemainingInput(state), length);
}
- state->mangled_cur += state->number;
- state->number = -1; // Reset the number.
+ state->parse_state.mangled_idx += length;
return true;
}
// <operator-name> ::= nw, and other two letters cases
// ::= cv <type> # (cast)
// ::= v <digit> <source-name> # vendor extended operator
-static bool ParseOperatorName(State *state) {
- if (RemainingLength(state) < 2) {
+static bool ParseOperatorName(State* state, int* arity) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) {
+ return false;
+ }
+ if (!AtLeastNumCharsRemaining(RemainingInput(state), 2)) {
return false;
}
// First check with "cv" (cast) case.
- State copy = *state;
- if (ParseTwoChar(state, "cv") && MaybeAppend(state, "operator ") &&
+ ParseState copy = state->parse_state;
+ if (ParseTwoCharToken(state, "cv") && MaybeAppend(state, "operator ") &&
EnterNestedName(state) && ParseType(state) &&
LeaveNestedName(state, copy.nest_level)) {
+ if (arity != nullptr) {
+ *arity = 1;
+ }
return true;
}
- *state = copy;
+ state->parse_state = copy;
// Then vendor extended operators.
- if (ParseChar(state, 'v') && ParseCharClass(state, "0123456789") &&
+ if (ParseOneCharToken(state, 'v') && ParseDigit(state, arity) &&
ParseSourceName(state)) {
return true;
}
- *state = copy;
+ state->parse_state = copy;
// Other operator names should start with a lower alphabet followed
// by a lower/upper alphabet.
- if (!(IsLower(state->mangled_cur[0]) &&
- IsAlpha(state->mangled_cur[1]))) {
+ if (!(IsLower(RemainingInput(state)[0]) &&
+ IsAlpha(RemainingInput(state)[1]))) {
return false;
}
// We may want to perform a binary search if we really need speed.
const AbbrevPair *p;
- for (p = kOperatorList; p->abbrev != NULL; ++p) {
- if (state->mangled_cur[0] == p->abbrev[0] &&
- state->mangled_cur[1] == p->abbrev[1]) {
+ for (p = kOperatorList; p->abbrev != nullptr; ++p) {
+ if (RemainingInput(state)[0] == p->abbrev[0] &&
+ RemainingInput(state)[1] == p->abbrev[1]) {
+ if (arity != nullptr) {
+ *arity = p->arity;
+ }
MaybeAppend(state, "operator");
if (IsLower(*p->real_name)) { // new, delete, etc.
MaybeAppend(state, " ");
}
MaybeAppend(state, p->real_name);
- state->mangled_cur += 2;
+ state->parse_state.mangled_idx += 2;
return true;
}
}
@@ -718,6 +1065,7 @@
// ::= TT <type>
// ::= TI <type>
// ::= TS <type>
+// ::= TH <type> # thread-local
// ::= Tc <call-offset> <call-offset> <(base) encoding>
// ::= GV <(object) name>
// ::= T <call-offset> <(base) encoding>
@@ -733,168 +1081,267 @@
// Note: we don't care much about them since they don't appear in
// stack traces. The are special data.
static bool ParseSpecialName(State *state) {
- State copy = *state;
- if (ParseChar(state, 'T') && ParseCharClass(state, "VTIS") &&
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) {
+ return false;
+ }
+ ParseState copy = state->parse_state;
+ if (ParseOneCharToken(state, 'T') && ParseCharClass(state, "VTISH") &&
ParseType(state)) {
return true;
}
- *state = copy;
+ state->parse_state = copy;
- if (ParseTwoChar(state, "Tc") && ParseCallOffset(state) &&
+ if (ParseTwoCharToken(state, "Tc") && ParseCallOffset(state) &&
ParseCallOffset(state) && ParseEncoding(state)) {
return true;
}
- *state = copy;
+ state->parse_state = copy;
- if (ParseTwoChar(state, "GV") && ParseName(state)) {
+ if (ParseTwoCharToken(state, "GV") && ParseName(state)) {
return true;
}
- *state = copy;
+ state->parse_state = copy;
- if (ParseChar(state, 'T') && ParseCallOffset(state) && ParseEncoding(state)) {
+ if (ParseOneCharToken(state, 'T') && ParseCallOffset(state) &&
+ ParseEncoding(state)) {
return true;
}
- *state = copy;
+ state->parse_state = copy;
// G++ extensions
- if (ParseTwoChar(state, "TC") && ParseType(state) && ParseNumber(state) &&
- ParseChar(state, '_') && DisableAppend(state) && ParseType(state)) {
+ if (ParseTwoCharToken(state, "TC") && ParseType(state) &&
+ ParseNumber(state, nullptr) && ParseOneCharToken(state, '_') &&
+ DisableAppend(state) && ParseType(state)) {
RestoreAppend(state, copy.append);
return true;
}
- *state = copy;
+ state->parse_state = copy;
- if (ParseChar(state, 'T') && ParseCharClass(state, "FJ") &&
+ if (ParseOneCharToken(state, 'T') && ParseCharClass(state, "FJ") &&
ParseType(state)) {
return true;
}
- *state = copy;
+ state->parse_state = copy;
- if (ParseTwoChar(state, "GR") && ParseName(state)) {
+ if (ParseTwoCharToken(state, "GR") && ParseName(state)) {
return true;
}
- *state = copy;
+ state->parse_state = copy;
- if (ParseTwoChar(state, "GA") && ParseEncoding(state)) {
+ if (ParseTwoCharToken(state, "GA") && ParseEncoding(state)) {
return true;
}
- *state = copy;
+ state->parse_state = copy;
- if (ParseChar(state, 'T') && ParseCharClass(state, "hv") &&
+ if (ParseOneCharToken(state, 'T') && ParseCharClass(state, "hv") &&
ParseCallOffset(state) && ParseEncoding(state)) {
return true;
}
- *state = copy;
+ state->parse_state = copy;
return false;
}
// <call-offset> ::= h <nv-offset> _
// ::= v <v-offset> _
static bool ParseCallOffset(State *state) {
- State copy = *state;
- if (ParseChar(state, 'h') && ParseNVOffset(state) && ParseChar(state, '_')) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) {
+ return false;
+ }
+ ParseState copy = state->parse_state;
+ if (ParseOneCharToken(state, 'h') && ParseNVOffset(state) &&
+ ParseOneCharToken(state, '_')) {
return true;
}
- *state = copy;
+ state->parse_state = copy;
- if (ParseChar(state, 'v') && ParseVOffset(state) && ParseChar(state, '_')) {
+ if (ParseOneCharToken(state, 'v') && ParseVOffset(state) &&
+ ParseOneCharToken(state, '_')) {
return true;
}
- *state = copy;
+ state->parse_state = copy;
return false;
}
// <nv-offset> ::= <(offset) number>
static bool ParseNVOffset(State *state) {
- return ParseNumber(state);
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) {
+ return false;
+ }
+ return ParseNumber(state, nullptr);
}
// <v-offset> ::= <(offset) number> _ <(virtual offset) number>
static bool ParseVOffset(State *state) {
- State copy = *state;
- if (ParseNumber(state) && ParseChar(state, '_') && ParseNumber(state)) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) {
+ return false;
+ }
+ ParseState copy = state->parse_state;
+ if (ParseNumber(state, nullptr) && ParseOneCharToken(state, '_') &&
+ ParseNumber(state, nullptr)) {
return true;
}
- *state = copy;
+ state->parse_state = copy;
return false;
}
-// <ctor-dtor-name> ::= C1 | C2 | C3
+// <ctor-dtor-name> ::= C1 | C2 | C3 | CI1 <base-class-type> | CI2
+// <base-class-type>
// ::= D0 | D1 | D2
+// # GCC extensions: "unified" constructor/destructor. See
+// #
+// https://github.com/gcc-mirror/gcc/blob/7ad17b583c3643bd4557f29b8391ca7ef08391f5/gcc/cp/mangle.c#L1847
+// ::= C4 | D4
static bool ParseCtorDtorName(State *state) {
- State copy = *state;
- if (ParseChar(state, 'C') && ParseCharClass(state, "123")) {
- const char * const prev_name = state->prev_name;
- const int prev_name_length = state->prev_name_length;
- MaybeAppendWithLength(state, prev_name, prev_name_length);
- return true;
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) {
+ return false;
}
- *state = copy;
+ ParseState copy = state->parse_state;
+ if (ParseOneCharToken(state, 'C')) {
+ if (ParseCharClass(state, "1234")) {
+ const char* const prev_name =
+ state->out + state->parse_state.prev_name_idx;
+ MaybeAppendWithLength(state, prev_name,
+ state->parse_state.prev_name_length);
+ return true;
+ } else if (ParseOneCharToken(state, 'I') && ParseCharClass(state, "12") &&
+ ParseClassEnumType(state)) {
+ return true;
+ }
+ }
+ state->parse_state = copy;
- if (ParseChar(state, 'D') && ParseCharClass(state, "012")) {
- const char * const prev_name = state->prev_name;
- const int prev_name_length = state->prev_name_length;
+ if (ParseOneCharToken(state, 'D') && ParseCharClass(state, "0124")) {
+ const char* const prev_name = state->out + state->parse_state.prev_name_idx;
MaybeAppend(state, "~");
- MaybeAppendWithLength(state, prev_name, prev_name_length);
+ MaybeAppendWithLength(state, prev_name,
+ state->parse_state.prev_name_length);
return true;
}
- *state = copy;
+ state->parse_state = copy;
+ return false;
+}
+
+// <decltype> ::= Dt <expression> E # decltype of an id-expression or class
+// # member access (C++0x)
+// ::= DT <expression> E # decltype of an expression (C++0x)
+static bool ParseDecltype(State* state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) {
+ return false;
+ }
+
+ ParseState copy = state->parse_state;
+ if (ParseOneCharToken(state, 'D') && ParseCharClass(state, "tT") &&
+ ParseExpression(state) && ParseOneCharToken(state, 'E')) {
+ return true;
+ }
+ state->parse_state = copy;
+
return false;
}
// <type> ::= <CV-qualifiers> <type>
-// ::= P <type>
-// ::= R <type>
-// ::= C <type>
-// ::= G <type>
-// ::= U <source-name> <type>
+// ::= P <type> # pointer-to
+// ::= R <type> # reference-to
+// ::= O <type> # rvalue reference-to (C++0x)
+// ::= C <type> # complex pair (C 2000)
+// ::= G <type> # imaginary (C 2000)
+// ::= U <source-name> <type> # vendor extended type qualifier
// ::= <builtin-type>
// ::= <function-type>
-// ::= <class-enum-type>
+// ::= <class-enum-type> # note: just an alias for <name>
// ::= <array-type>
// ::= <pointer-to-member-type>
// ::= <template-template-param> <template-args>
// ::= <template-param>
+// ::= <decltype>
// ::= <substitution>
+// ::= Dp <type> # pack expansion of (C++0x)
+// ::= Dv <num-elems> _ # GNU vector extension
+//
static bool ParseType(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) {
+ return false;
+ }
+ ParseState copy = state->parse_state;
+
// We should check CV-qualifers, and PRGC things first.
- State copy = *state;
- if (ParseCVQualifiers(state) && ParseType(state)) {
+ //
+ // CV-qualifiers overlap with some operator names, but an operator name is not
+ // valid as a type. To avoid an ambiguity that can lead to exponential time
+ // complexity, refuse to backtrack the CV-qualifiers.
+ //
+ // _Z4aoeuIrMvvE
+ // => _Z 4aoeuI rM v v E
+ // aoeu<operator%=, void, void>
+ // => _Z 4aoeuI r Mv v E
+ // aoeu<void void::* restrict>
+ //
+ // By consuming the CV-qualifiers first, the former parse is disabled.
+ if (ParseCVQualifiers(state)) {
+ const bool result = ParseType(state);
+ if (!result) {
+ state->parse_state = copy;
+ }
+ return result;
+ }
+ state->parse_state = copy;
+
+ // Similarly, these tag characters can overlap with other <name>s resulting in
+ // two different parse prefixes that land on <template-args> in the same
+ // place, such as "C3r1xI...". So, disable the "ctor-name = C3" parse by
+ // refusing to backtrack the tag characters.
+ if (ParseCharClass(state, "OPRCG")) {
+ const bool result = ParseType(state);
+ if (!result) {
+ state->parse_state = copy;
+ }
+ return result;
+ }
+ state->parse_state = copy;
+
+ if (ParseTwoCharToken(state, "Dp") && ParseType(state)) {
return true;
}
- *state = copy;
+ state->parse_state = copy;
- if (ParseCharClass(state, "PRCG") && ParseType(state)) {
+ if (ParseOneCharToken(state, 'U') && ParseSourceName(state) &&
+ ParseType(state)) {
return true;
}
- *state = copy;
+ state->parse_state = copy;
- if (ParseChar(state, 'U') && ParseSourceName(state) && ParseType(state)) {
- return true;
- }
- *state = copy;
-
- if (ParseBuiltinType(state) ||
- ParseFunctionType(state) ||
- ParseClassEnumType(state) ||
- ParseArrayType(state) ||
- ParsePointerToMemberType(state) ||
- ParseSubstitution(state)) {
+ if (ParseBuiltinType(state) || ParseFunctionType(state) ||
+ ParseClassEnumType(state) || ParseArrayType(state) ||
+ ParsePointerToMemberType(state) || ParseDecltype(state) ||
+ // "std" on its own isn't a type.
+ ParseSubstitution(state, /*accept_std=*/false)) {
return true;
}
- if (ParseTemplateTemplateParam(state) &&
- ParseTemplateArgs(state)) {
+ if (ParseTemplateTemplateParam(state) && ParseTemplateArgs(state)) {
return true;
}
- *state = copy;
+ state->parse_state = copy;
// Less greedy than <template-template-param> <template-args>.
if (ParseTemplateParam(state)) {
return true;
}
+ if (ParseTwoCharToken(state, "Dv") && ParseNumber(state, nullptr) &&
+ ParseOneCharToken(state, '_')) {
+ return true;
+ }
+ state->parse_state = copy;
+
return false;
}
@@ -902,200 +1349,540 @@
// We don't allow empty <CV-qualifiers> to avoid infinite loop in
// ParseType().
static bool ParseCVQualifiers(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) {
+ return false;
+ }
int num_cv_qualifiers = 0;
- num_cv_qualifiers += ParseChar(state, 'r');
- num_cv_qualifiers += ParseChar(state, 'V');
- num_cv_qualifiers += ParseChar(state, 'K');
+ num_cv_qualifiers += ParseOneCharToken(state, 'r');
+ num_cv_qualifiers += ParseOneCharToken(state, 'V');
+ num_cv_qualifiers += ParseOneCharToken(state, 'K');
return num_cv_qualifiers > 0;
}
-// <builtin-type> ::= v, etc.
+// <builtin-type> ::= v, etc. # single-character builtin types
// ::= u <source-name>
+// ::= Dd, etc. # two-character builtin types
+//
+// Not supported:
+// ::= DF <number> _ # _FloatN (N bits)
+//
static bool ParseBuiltinType(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) {
+ return false;
+ }
const AbbrevPair *p;
- for (p = kBuiltinTypeList; p->abbrev != NULL; ++p) {
- if (state->mangled_cur[0] == p->abbrev[0]) {
+ for (p = kBuiltinTypeList; p->abbrev != nullptr; ++p) {
+ // Guaranteed only 1- or 2-character strings in kBuiltinTypeList.
+ if (p->abbrev[1] == '\0') {
+ if (ParseOneCharToken(state, p->abbrev[0])) {
+ MaybeAppend(state, p->real_name);
+ return true;
+ }
+ } else if (p->abbrev[2] == '\0' && ParseTwoCharToken(state, p->abbrev)) {
MaybeAppend(state, p->real_name);
- ++state->mangled_cur;
return true;
}
}
- State copy = *state;
- if (ParseChar(state, 'u') && ParseSourceName(state)) {
+ ParseState copy = state->parse_state;
+ if (ParseOneCharToken(state, 'u') && ParseSourceName(state)) {
return true;
}
- *state = copy;
+ state->parse_state = copy;
return false;
}
-// <function-type> ::= F [Y] <bare-function-type> E
-static bool ParseFunctionType(State *state) {
- State copy = *state;
- if (ParseChar(state, 'F') && Optional(ParseChar(state, 'Y')) &&
- ParseBareFunctionType(state) && ParseChar(state, 'E')) {
+// <exception-spec> ::= Do # non-throwing
+// exception-specification (e.g.,
+// noexcept, throw())
+// ::= DO <expression> E # computed (instantiation-dependent)
+// noexcept
+// ::= Dw <type>+ E # dynamic exception specification
+// with instantiation-dependent types
+static bool ParseExceptionSpec(State* state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) {
+ return false;
+ }
+
+ if (ParseTwoCharToken(state, "Do")) {
return true;
}
- *state = copy;
+
+ ParseState copy = state->parse_state;
+ if (ParseTwoCharToken(state, "DO") && ParseExpression(state) &&
+ ParseOneCharToken(state, 'E')) {
+ return true;
+ }
+ state->parse_state = copy;
+ if (ParseTwoCharToken(state, "Dw") && OneOrMore(ParseType, state) &&
+ ParseOneCharToken(state, 'E')) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ return false;
+}
+
+// <function-type> ::= [exception-spec] F [Y] <bare-function-type> [O] E
+static bool ParseFunctionType(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) {
+ return false;
+ }
+ ParseState copy = state->parse_state;
+ if (Optional(ParseExceptionSpec(state)) && ParseOneCharToken(state, 'F') &&
+ Optional(ParseOneCharToken(state, 'Y')) && ParseBareFunctionType(state) &&
+ Optional(ParseOneCharToken(state, 'O')) &&
+ ParseOneCharToken(state, 'E')) {
+ return true;
+ }
+ state->parse_state = copy;
return false;
}
// <bare-function-type> ::= <(signature) type>+
static bool ParseBareFunctionType(State *state) {
- State copy = *state;
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) {
+ return false;
+ }
+ ParseState copy = state->parse_state;
DisableAppend(state);
if (OneOrMore(ParseType, state)) {
RestoreAppend(state, copy.append);
MaybeAppend(state, "()");
return true;
}
- *state = copy;
+ state->parse_state = copy;
return false;
}
// <class-enum-type> ::= <name>
static bool ParseClassEnumType(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) {
+ return false;
+ }
return ParseName(state);
}
// <array-type> ::= A <(positive dimension) number> _ <(element) type>
// ::= A [<(dimension) expression>] _ <(element) type>
static bool ParseArrayType(State *state) {
- State copy = *state;
- if (ParseChar(state, 'A') && ParseNumber(state) && ParseChar(state, '_') &&
- ParseType(state)) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) {
+ return false;
+ }
+ ParseState copy = state->parse_state;
+ if (ParseOneCharToken(state, 'A') && ParseNumber(state, nullptr) &&
+ ParseOneCharToken(state, '_') && ParseType(state)) {
return true;
}
- *state = copy;
+ state->parse_state = copy;
- if (ParseChar(state, 'A') && Optional(ParseExpression(state)) &&
- ParseChar(state, '_') && ParseType(state)) {
+ if (ParseOneCharToken(state, 'A') && Optional(ParseExpression(state)) &&
+ ParseOneCharToken(state, '_') && ParseType(state)) {
return true;
}
- *state = copy;
+ state->parse_state = copy;
return false;
}
// <pointer-to-member-type> ::= M <(class) type> <(member) type>
static bool ParsePointerToMemberType(State *state) {
- State copy = *state;
- if (ParseChar(state, 'M') && ParseType(state) && ParseType(state)) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) {
+ return false;
+ }
+ ParseState copy = state->parse_state;
+ if (ParseOneCharToken(state, 'M') && ParseType(state) && ParseType(state)) {
return true;
}
- *state = copy;
+ state->parse_state = copy;
return false;
}
// <template-param> ::= T_
// ::= T <parameter-2 non-negative number> _
static bool ParseTemplateParam(State *state) {
- if (ParseTwoChar(state, "T_")) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) {
+ return false;
+ }
+ if (ParseTwoCharToken(state, "T_")) {
MaybeAppend(state, "?"); // We don't support template substitutions.
return true;
}
- State copy = *state;
- if (ParseChar(state, 'T') && ParseNumber(state) && ParseChar(state, '_')) {
+ ParseState copy = state->parse_state;
+ if (ParseOneCharToken(state, 'T') && ParseNumber(state, nullptr) &&
+ ParseOneCharToken(state, '_')) {
MaybeAppend(state, "?"); // We don't support template substitutions.
return true;
}
- *state = copy;
+ state->parse_state = copy;
return false;
}
-
// <template-template-param> ::= <template-param>
// ::= <substitution>
static bool ParseTemplateTemplateParam(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) {
+ return false;
+ }
return (ParseTemplateParam(state) ||
- ParseSubstitution(state));
+ // "std" on its own isn't a template.
+ ParseSubstitution(state, /*accept_std=*/false));
}
// <template-args> ::= I <template-arg>+ E
static bool ParseTemplateArgs(State *state) {
- State copy = *state;
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) {
+ return false;
+ }
+ ParseState copy = state->parse_state;
DisableAppend(state);
- if (ParseChar(state, 'I') && OneOrMore(ParseTemplateArg, state) &&
- ParseChar(state, 'E')) {
+ if (ParseOneCharToken(state, 'I') && OneOrMore(ParseTemplateArg, state) &&
+ ParseOneCharToken(state, 'E')) {
RestoreAppend(state, copy.append);
MaybeAppend(state, "<>");
return true;
}
- *state = copy;
+ state->parse_state = copy;
return false;
}
// <template-arg> ::= <type>
// ::= <expr-primary>
+// ::= J <template-arg>* E # argument pack
// ::= X <expression> E
static bool ParseTemplateArg(State *state) {
- if (ParseType(state) ||
- ParseExprPrimary(state)) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) {
+ return false;
+ }
+ ParseState copy = state->parse_state;
+ if (ParseOneCharToken(state, 'J') && ZeroOrMore(ParseTemplateArg, state) &&
+ ParseOneCharToken(state, 'E')) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ // There can be significant overlap between the following leading to
+ // exponential backtracking:
+ //
+ // <expr-primary> ::= L <type> <expr-cast-value> E
+ // e.g. L 2xxIvE 1 E
+ // <type> ==> <local-source-name> <template-args>
+ // e.g. L 2xx IvE
+ //
+ // This means parsing an entire <type> twice, and <type> can contain
+ // <template-arg>, so this can generate exponential backtracking. There is
+ // only overlap when the remaining input starts with "L <source-name>", so
+ // parse all cases that can start this way jointly to share the common prefix.
+ //
+ // We have:
+ //
+ // <template-arg> ::= <type>
+ // ::= <expr-primary>
+ //
+ // First, drop all the productions of <type> that must start with something
+ // other than 'L'. All that's left is <class-enum-type>; inline it.
+ //
+ // <type> ::= <nested-name> # starts with 'N'
+ // ::= <unscoped-name>
+ // ::= <unscoped-template-name> <template-args>
+ // ::= <local-name> # starts with 'Z'
+ //
+ // Drop and inline again:
+ //
+ // <type> ::= <unscoped-name>
+ // ::= <unscoped-name> <template-args>
+ // ::= <substitution> <template-args> # starts with 'S'
+ //
+ // Merge the first two, inline <unscoped-name>, drop last:
+ //
+ // <type> ::= <unqualified-name> [<template-args>]
+ // ::= St <unqualified-name> [<template-args>] # starts with 'S'
+ //
+ // Drop and inline:
+ //
+ // <type> ::= <operator-name> [<template-args>] # starts with lowercase
+ // ::= <ctor-dtor-name> [<template-args>] # starts with 'C' or 'D'
+ // ::= <source-name> [<template-args>] # starts with digit
+ // ::= <local-source-name> [<template-args>]
+ // ::= <unnamed-type-name> [<template-args>] # starts with 'U'
+ //
+ // One more time:
+ //
+ // <type> ::= L <source-name> [<template-args>]
+ //
+ // Likewise with <expr-primary>:
+ //
+ // <expr-primary> ::= L <type> <expr-cast-value> E
+ // ::= LZ <encoding> E # cannot overlap; drop
+ // ::= L <mangled_name> E # cannot overlap; drop
+ //
+ // By similar reasoning as shown above, the only <type>s starting with
+ // <source-name> are "<source-name> [<template-args>]". Inline this.
+ //
+ // <expr-primary> ::= L <source-name> [<template-args>] <expr-cast-value> E
+ //
+ // Now inline both of these into <template-arg>:
+ //
+ // <template-arg> ::= L <source-name> [<template-args>]
+ // ::= L <source-name> [<template-args>] <expr-cast-value> E
+ //
+ // Merge them and we're done:
+ // <template-arg>
+ // ::= L <source-name> [<template-args>] [<expr-cast-value> E]
+ if (ParseLocalSourceName(state) && Optional(ParseTemplateArgs(state))) {
+ copy = state->parse_state;
+ if (ParseExprCastValue(state) && ParseOneCharToken(state, 'E')) {
+ return true;
+ }
+ state->parse_state = copy;
return true;
}
- State copy = *state;
- if (ParseChar(state, 'X') && ParseExpression(state) &&
- ParseChar(state, 'E')) {
+ // Now that the overlapping cases can't reach this code, we can safely call
+ // both of these.
+ if (ParseType(state) || ParseExprPrimary(state)) {
return true;
}
- *state = copy;
+ state->parse_state = copy;
+
+ if (ParseOneCharToken(state, 'X') && ParseExpression(state) &&
+ ParseOneCharToken(state, 'E')) {
+ return true;
+ }
+ state->parse_state = copy;
return false;
}
-// <expression> ::= <template-param>
-// ::= <expr-primary>
-// ::= <unary operator-name> <expression>
-// ::= <binary operator-name> <expression> <expression>
-// ::= <trinary operator-name> <expression> <expression>
-// <expression>
+// <unresolved-type> ::= <template-param> [<template-args>]
+// ::= <decltype>
+// ::= <substitution>
+static inline bool ParseUnresolvedType(State* state) {
+ // No ComplexityGuard because we don't copy the state in this stack frame.
+ return (ParseTemplateParam(state) && Optional(ParseTemplateArgs(state))) ||
+ ParseDecltype(state) || ParseSubstitution(state, /*accept_std=*/false);
+}
+
+// <simple-id> ::= <source-name> [<template-args>]
+static inline bool ParseSimpleId(State* state) {
+ // No ComplexityGuard because we don't copy the state in this stack frame.
+
+ // Note: <simple-id> cannot be followed by a parameter pack; see comment in
+ // ParseUnresolvedType.
+ return ParseSourceName(state) && Optional(ParseTemplateArgs(state));
+}
+
+// <base-unresolved-name> ::= <source-name> [<template-args>]
+// ::= on <operator-name> [<template-args>]
+// ::= dn <destructor-name>
+static bool ParseBaseUnresolvedName(State* state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) {
+ return false;
+ }
+
+ if (ParseSimpleId(state)) {
+ return true;
+ }
+
+ ParseState copy = state->parse_state;
+ if (ParseTwoCharToken(state, "on") && ParseOperatorName(state, nullptr) &&
+ Optional(ParseTemplateArgs(state))) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ if (ParseTwoCharToken(state, "dn") &&
+ (ParseUnresolvedType(state) || ParseSimpleId(state))) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ return false;
+}
+
+// <unresolved-name> ::= [gs] <base-unresolved-name>
+// ::= sr <unresolved-type> <base-unresolved-name>
+// ::= srN <unresolved-type> <unresolved-qualifier-level>+ E
+// <base-unresolved-name>
+// ::= [gs] sr <unresolved-qualifier-level>+ E
+// <base-unresolved-name>
+static bool ParseUnresolvedName(State* state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) {
+ return false;
+ }
+
+ ParseState copy = state->parse_state;
+ if (Optional(ParseTwoCharToken(state, "gs")) &&
+ ParseBaseUnresolvedName(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ if (ParseTwoCharToken(state, "sr") && ParseUnresolvedType(state) &&
+ ParseBaseUnresolvedName(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ if (ParseTwoCharToken(state, "sr") && ParseOneCharToken(state, 'N') &&
+ ParseUnresolvedType(state) &&
+ OneOrMore(/* <unresolved-qualifier-level> ::= */ ParseSimpleId, state) &&
+ ParseOneCharToken(state, 'E') && ParseBaseUnresolvedName(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ if (Optional(ParseTwoCharToken(state, "gs")) &&
+ ParseTwoCharToken(state, "sr") &&
+ OneOrMore(/* <unresolved-qualifier-level> ::= */ ParseSimpleId, state) &&
+ ParseOneCharToken(state, 'E') && ParseBaseUnresolvedName(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ return false;
+}
+
+// <expression> ::= <1-ary operator-name> <expression>
+// ::= <2-ary operator-name> <expression> <expression>
+// ::= <3-ary operator-name> <expression> <expression> <expression>
+// ::= cl <expression>+ E
+// ::= cp <simple-id> <expression>* E # Clang-specific.
+// ::= cv <type> <expression> # type (expression)
+// ::= cv <type> _ <expression>* E # type (expr-list)
// ::= st <type>
+// ::= <template-param>
+// ::= <function-param>
+// ::= <expr-primary>
+// ::= dt <expression> <unresolved-name> # expr.name
+// ::= pt <expression> <unresolved-name> # expr->name
+// ::= sp <expression> # argument pack expansion
// ::= sr <type> <unqualified-name> <template-args>
// ::= sr <type> <unqualified-name>
+// <function-param> ::= fp <(top-level) CV-qualifiers> _
+// ::= fp <(top-level) CV-qualifiers> <number> _
+// ::= fL <number> p <(top-level) CV-qualifiers> _
+// ::= fL <number> p <(top-level) CV-qualifiers> <number> _
static bool ParseExpression(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) {
+ return false;
+ }
if (ParseTemplateParam(state) || ParseExprPrimary(state)) {
return true;
}
- State copy = *state;
- if (ParseOperatorName(state) &&
- ParseExpression(state) &&
- ParseExpression(state) &&
+ ParseState copy = state->parse_state;
+
+ // Object/function call expression.
+ if (ParseTwoCharToken(state, "cl") && OneOrMore(ParseExpression, state) &&
+ ParseOneCharToken(state, 'E')) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ // Clang-specific "cp <simple-id> <expression>* E"
+ // https://clang.llvm.org/doxygen/ItaniumMangle_8cpp_source.html#l04338
+ if (ParseTwoCharToken(state, "cp") && ParseSimpleId(state) &&
+ ZeroOrMore(ParseExpression, state) && ParseOneCharToken(state, 'E')) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ // Function-param expression (level 0).
+ if (ParseTwoCharToken(state, "fp") && Optional(ParseCVQualifiers(state)) &&
+ Optional(ParseNumber(state, nullptr)) && ParseOneCharToken(state, '_')) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ // Function-param expression (level 1+).
+ if (ParseTwoCharToken(state, "fL") && Optional(ParseNumber(state, nullptr)) &&
+ ParseOneCharToken(state, 'p') && Optional(ParseCVQualifiers(state)) &&
+ Optional(ParseNumber(state, nullptr)) && ParseOneCharToken(state, '_')) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ // Parse the conversion expressions jointly to avoid re-parsing the <type> in
+ // their common prefix. Parsed as:
+ // <expression> ::= cv <type> <conversion-args>
+ // <conversion-args> ::= _ <expression>* E
+ // ::= <expression>
+ //
+ // Also don't try ParseOperatorName after seeing "cv", since ParseOperatorName
+ // also needs to accept "cv <type>" in other contexts.
+ if (ParseTwoCharToken(state, "cv")) {
+ if (ParseType(state)) {
+ ParseState copy2 = state->parse_state;
+ if (ParseOneCharToken(state, '_') && ZeroOrMore(ParseExpression, state) &&
+ ParseOneCharToken(state, 'E')) {
+ return true;
+ }
+ state->parse_state = copy2;
+ if (ParseExpression(state)) {
+ return true;
+ }
+ }
+ } else {
+ // Parse unary, binary, and ternary operator expressions jointly, taking
+ // care not to re-parse subexpressions repeatedly. Parse like:
+ // <expression> ::= <operator-name> <expression>
+ // [<one-to-two-expressions>]
+ // <one-to-two-expressions> ::= <expression> [<expression>]
+ int arity = -1;
+ if (ParseOperatorName(state, &arity) &&
+ arity > 0 && // 0 arity => disabled.
+ (arity < 3 || ParseExpression(state)) &&
+ (arity < 2 || ParseExpression(state)) &&
+ (arity < 1 || ParseExpression(state))) {
+ return true;
+ }
+ }
+ state->parse_state = copy;
+
+ // sizeof type
+ if (ParseTwoCharToken(state, "st") && ParseType(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ // Object and pointer member access expressions.
+ if ((ParseTwoCharToken(state, "dt") || ParseTwoCharToken(state, "pt")) &&
+ ParseExpression(state) && ParseType(state)) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ // Pointer-to-member access expressions. This parses the same as a binary
+ // operator, but it's implemented separately because "ds" shouldn't be
+ // accepted in other contexts that parse an operator name.
+ if (ParseTwoCharToken(state, "ds") && ParseExpression(state) &&
ParseExpression(state)) {
return true;
}
- *state = copy;
+ state->parse_state = copy;
- if (ParseOperatorName(state) &&
- ParseExpression(state) &&
- ParseExpression(state)) {
+ // Parameter pack expansion
+ if (ParseTwoCharToken(state, "sp") && ParseExpression(state)) {
return true;
}
- *state = copy;
+ state->parse_state = copy;
- if (ParseOperatorName(state) &&
- ParseExpression(state)) {
- return true;
- }
- *state = copy;
-
- if (ParseTwoChar(state, "st") && ParseType(state)) {
- return true;
- }
- *state = copy;
-
- if (ParseTwoChar(state, "sr") && ParseType(state) &&
- ParseUnqualifiedName(state) && ParseTemplateArgs(state)) {
- return true;
- }
- *state = copy;
-
- if (ParseTwoChar(state, "sr") && ParseType(state) &&
- ParseUnqualifiedName(state)) {
- return true;
- }
- *state = copy;
- return false;
+ return ParseUnresolvedName(state);
}
// <expr-primary> ::= L <type> <(value) number> E
@@ -1103,106 +1890,259 @@
// ::= L <mangled-name> E
// // A bug in g++'s C++ ABI version 2 (-fabi-version=2).
// ::= LZ <encoding> E
+//
+// Warning, subtle: the "bug" LZ production above is ambiguous with the first
+// production where <type> starts with <local-name>, which can lead to
+// exponential backtracking in two scenarios:
+//
+// - When whatever follows the E in the <local-name> in the first production is
+// not a name, we backtrack the whole <encoding> and re-parse the whole thing.
+//
+// - When whatever follows the <local-name> in the first production is not a
+// number and this <expr-primary> may be followed by a name, we backtrack the
+// <name> and re-parse it.
+//
+// Moreover this ambiguity isn't always resolved -- for example, the following
+// has two different parses:
+//
+// _ZaaILZ4aoeuE1x1EvE
+// => operator&&<aoeu, x, E, void>
+// => operator&&<(aoeu::x)(1), void>
+//
+// To resolve this, we just do what GCC's demangler does, and refuse to parse
+// casts to <local-name> types.
static bool ParseExprPrimary(State *state) {
- State copy = *state;
- if (ParseChar(state, 'L') && ParseType(state) && ParseNumber(state) &&
- ParseChar(state, 'E')) {
- return true;
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) {
+ return false;
}
- *state = copy;
+ ParseState copy = state->parse_state;
- if (ParseChar(state, 'L') && ParseType(state) && ParseFloatNumber(state) &&
- ParseChar(state, 'E')) {
- return true;
- }
- *state = copy;
+ // The "LZ" special case: if we see LZ, we commit to accept "LZ <encoding> E"
+ // or fail, no backtracking.
+ if (ParseTwoCharToken(state, "LZ")) {
+ if (ParseEncoding(state) && ParseOneCharToken(state, 'E')) {
+ return true;
+ }
- if (ParseChar(state, 'L') && ParseMangledName(state) &&
- ParseChar(state, 'E')) {
- return true;
+ state->parse_state = copy;
+ return false;
}
- *state = copy;
- if (ParseTwoChar(state, "LZ") && ParseEncoding(state) &&
- ParseChar(state, 'E')) {
+ // The merged cast production.
+ if (ParseOneCharToken(state, 'L') && ParseType(state) &&
+ ParseExprCastValue(state)) {
return true;
}
- *state = copy;
+ state->parse_state = copy;
+
+ if (ParseOneCharToken(state, 'L') && ParseMangledName(state) &&
+ ParseOneCharToken(state, 'E')) {
+ return true;
+ }
+ state->parse_state = copy;
return false;
}
-// <local-name> := Z <(function) encoding> E <(entity) name>
-// [<discriminator>]
-// := Z <(function) encoding> E s [<discriminator>]
-static bool ParseLocalName(State *state) {
- State copy = *state;
- if (ParseChar(state, 'Z') && ParseEncoding(state) && ParseChar(state, 'E') &&
- MaybeAppend(state, "::") && ParseName(state) &&
+// <number> or <float>, followed by 'E', as described above ParseExprPrimary.
+static bool ParseExprCastValue(State* state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) {
+ return false;
+ }
+ // We have to be able to backtrack after accepting a number because we could
+ // have e.g. "7fffE", which will accept "7" as a number but then fail to find
+ // the 'E'.
+ ParseState copy = state->parse_state;
+ if (ParseNumber(state, nullptr) && ParseOneCharToken(state, 'E')) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ if (ParseFloatNumber(state) && ParseOneCharToken(state, 'E')) {
+ return true;
+ }
+ state->parse_state = copy;
+
+ return false;
+}
+
+// <local-name> ::= Z <(function) encoding> E <(entity) name> [<discriminator>]
+// ::= Z <(function) encoding> E s [<discriminator>]
+//
+// Parsing a common prefix of these two productions together avoids an
+// exponential blowup of backtracking. Parse like:
+// <local-name> := Z <encoding> E <local-name-suffix>
+// <local-name-suffix> ::= s [<discriminator>]
+// ::= <name> [<discriminator>]
+
+static bool ParseLocalNameSuffix(State* state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) {
+ return false;
+ }
+
+ if (MaybeAppend(state, "::") && ParseName(state) &&
Optional(ParseDiscriminator(state))) {
return true;
}
- *state = copy;
- if (ParseChar(state, 'Z') && ParseEncoding(state) &&
- ParseTwoChar(state, "Es") && Optional(ParseDiscriminator(state))) {
+ // Since we're not going to overwrite the above "::" by re-parsing the
+ // <encoding> (whose trailing '\0' byte was in the byte now holding the
+ // first ':'), we have to rollback the "::" if the <name> parse failed.
+ if (state->parse_state.append) {
+ state->out[state->parse_state.out_cur_idx - 2] = '\0';
+ }
+
+ return ParseOneCharToken(state, 's') && Optional(ParseDiscriminator(state));
+}
+
+static bool ParseLocalName(State* state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) {
+ return false;
+ }
+ ParseState copy = state->parse_state;
+ if (ParseOneCharToken(state, 'Z') && ParseEncoding(state) &&
+ ParseOneCharToken(state, 'E') && ParseLocalNameSuffix(state)) {
return true;
}
- *state = copy;
+ state->parse_state = copy;
return false;
}
// <discriminator> := _ <(non-negative) number>
static bool ParseDiscriminator(State *state) {
- State copy = *state;
- if (ParseChar(state, '_') && ParseNumber(state)) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) {
+ return false;
+ }
+ ParseState copy = state->parse_state;
+ if (ParseOneCharToken(state, '_') && ParseNumber(state, nullptr)) {
return true;
}
- *state = copy;
+ state->parse_state = copy;
return false;
}
// <substitution> ::= S_
// ::= S <seq-id> _
// ::= St, etc.
-static bool ParseSubstitution(State *state) {
- if (ParseTwoChar(state, "S_")) {
+//
+// "St" is special in that it's not valid as a standalone name, and it *is*
+// allowed to precede a name without being wrapped in "N...E". This means that
+// if we accept it on its own, we can accept "St1a" and try to parse
+// template-args, then fail and backtrack, accept "St" on its own, then "1a" as
+// an unqualified name and re-parse the same template-args. To block this
+// exponential backtracking, we disable it with 'accept_std=false' in
+// problematic contexts.
+static bool ParseSubstitution(State* state, bool accept_std) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) {
+ return false;
+ }
+ if (ParseTwoCharToken(state, "S_")) {
MaybeAppend(state, "?"); // We don't support substitutions.
return true;
}
- State copy = *state;
- if (ParseChar(state, 'S') && ParseSeqId(state) && ParseChar(state, '_')) {
+ ParseState copy = state->parse_state;
+ if (ParseOneCharToken(state, 'S') && ParseSeqId(state) &&
+ ParseOneCharToken(state, '_')) {
MaybeAppend(state, "?"); // We don't support substitutions.
return true;
}
- *state = copy;
+ state->parse_state = copy;
// Expand abbreviations like "St" => "std".
- if (ParseChar(state, 'S')) {
+ if (ParseOneCharToken(state, 'S')) {
const AbbrevPair *p;
- for (p = kSubstitutionList; p->abbrev != NULL; ++p) {
- if (state->mangled_cur[0] == p->abbrev[1]) {
+ for (p = kSubstitutionList; p->abbrev != nullptr; ++p) {
+ if (RemainingInput(state)[0] == p->abbrev[1] &&
+ (accept_std || p->abbrev[1] != 't')) {
MaybeAppend(state, "std");
if (p->real_name[0] != '\0') {
MaybeAppend(state, "::");
MaybeAppend(state, p->real_name);
}
- state->mangled_cur += 1;
+ ++state->parse_state.mangled_idx;
return true;
}
}
}
- *state = copy;
+ state->parse_state = copy;
return false;
}
+// Parse <mangled-name>, optionally followed by either a function-clone suffix
+// or version suffix. Returns true only if all of "mangled_cur" was consumed.
+static bool ParseTopLevelMangledName(State *state) {
+ ComplexityGuard guard(state);
+ if (guard.IsTooComplex()) {
+ return false;
+ }
+ if (ParseMangledName(state)) {
+ if (RemainingInput(state)[0] != '\0') {
+ // Drop trailing function clone suffix, if any.
+ if (IsFunctionCloneSuffix(RemainingInput(state))) {
+ return true;
+ }
+ // Append trailing version suffix if any.
+ // ex. _Z3foo@@GLIBCXX_3.4
+ if (RemainingInput(state)[0] == '@') {
+ MaybeAppend(state, RemainingInput(state));
+ return true;
+ }
+ return false; // Unconsumed suffix.
+ }
+ return true;
+ }
+ return false;
+}
+
+static bool Overflowed(const State* state) {
+ return state->parse_state.out_cur_idx >= state->out_end_idx;
+}
+#endif
+
// The demangler entry point.
-bool Demangle(const char *mangled, char *out, int out_size) {
+bool Demangle(const char* mangled, char* out, size_t out_size) {
+#if defined(GLOG_OS_WINDOWS)
+#if defined(HAVE_DBGHELP)
+ // When built with incremental linking, the Windows debugger
+ // library provides a more complicated `Symbol->Name` with the
+ // Incremental Linking Table offset, which looks like
+ // `@ILT+1105(?func@Foo@@SAXH@Z)`. However, the demangler expects
+ // only the mangled symbol, `?func@Foo@@SAXH@Z`. Fortunately, the
+ // mangled symbol is guaranteed not to have parentheses,
+ // so we search for `(` and extract up to `)`.
+ //
+ // Since we may be in a signal handler here, we cannot use `std::string`.
+ char buffer[1024]; // Big enough for a sane symbol.
+ const char *lparen = strchr(mangled, '(');
+ if (lparen) {
+ // Extract the string `(?...)`
+ const char *rparen = strchr(lparen, ')');
+ size_t length = static_cast<size_t>(rparen - lparen) - 1;
+ strncpy(buffer, lparen + 1, length);
+ buffer[length] = '\0';
+ mangled = buffer;
+ } // Else the symbol wasn't inside a set of parentheses
+ // We use the ANSI version to ensure the string type is always `char *`.
+ return UnDecorateSymbolName(mangled, out, out_size, UNDNAME_COMPLETE);
+#else
+ (void)mangled;
+ (void)out;
+ (void)out_size;
+ return false;
+#endif
+#else
State state;
InitState(&state, mangled, out, out_size);
- return (ParseMangledName(&state) && state.overflowed == false &&
- RemainingLength(&state) == 0);
+ return ParseTopLevelMangledName(&state) && !Overflowed(&state) &&
+ state.parse_state.out_cur_idx > 0;
+#endif
}
_END_GOOGLE_NAMESPACE_
diff --git a/base/third_party/symbolize/demangle.h b/base/third_party/symbolize/demangle.h
index 9c75915..620019f 100644
--- a/base/third_party/symbolize/demangle.h
+++ b/base/third_party/symbolize/demangle.h
@@ -70,14 +70,21 @@
#ifndef BASE_DEMANGLE_H_
#define BASE_DEMANGLE_H_
+#include <stddef.h>
+
#include "config.h"
+#include "glog/logging.h"
_START_GOOGLE_NAMESPACE_
+#if defined(STARBOARD)
+#define GLOG_EXPORT
+#endif
+
// Demangle "mangled". On success, return true and write the
// demangled symbol name to "out". Otherwise, return false.
// "out" is modified even if demangling is unsuccessful.
-bool Demangle(const char *mangled, char *out, int out_size);
+bool GLOG_EXPORT Demangle(const char* mangled, char* out, size_t out_size);
_END_GOOGLE_NAMESPACE_
diff --git a/base/third_party/symbolize/glog/logging.h b/base/third_party/symbolize/glog/logging.h
index a42c306..b935e3e 100644
--- a/base/third_party/symbolize/glog/logging.h
+++ b/base/third_party/symbolize/glog/logging.h
@@ -1,5 +1,41 @@
-// Copyright (c) 2010 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
+// Copyright (c) 2022, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: Ray Sidney
+//
+// This file contains #include information about logging-related stuff.
+// Pretty much everybody needs to #include this file so that they can
+// log various happenings.
+//
+#ifndef GLOG_LOGGING_H
+#define GLOG_LOGGING_H
-// Empty.
+// Not needed in Chrome.
+
+#endif // GLOG_LOGGING_H
diff --git a/base/third_party/symbolize/glog/raw_logging.h b/base/third_party/symbolize/glog/raw_logging.h
index f5515c4..74619b9 100644
--- a/base/third_party/symbolize/glog/raw_logging.h
+++ b/base/third_party/symbolize/glog/raw_logging.h
@@ -1,6 +1,41 @@
-// Copyright (c) 2010 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
+// Copyright (c) 2006, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: Maxim Lifantsev
+//
+// Thread-safe logging routines that do not allocate any memory or
+// acquire any locks, and can therefore be used by low-level memory
+// allocation and synchronization code.
-#define WARNING 1;
-#define RAW_LOG(severity, ...); // Do nothing.
+#ifndef GLOG_RAW_LOGGING_H
+#define GLOG_RAW_LOGGING_H
+
+#define RAW_LOG(...) // Do nothing.
+
+#endif // GLOG_RAW_LOGGING_H
diff --git a/base/third_party/symbolize/patches/001-fix-up-includes.patch b/base/third_party/symbolize/patches/001-fix-up-includes.patch
new file mode 100644
index 0000000..3c6b828
--- /dev/null
+++ b/base/third_party/symbolize/patches/001-fix-up-includes.patch
@@ -0,0 +1,56 @@
+diff --git a/base/third_party/symbolize/demangle.h b/base/third_party/symbolize/demangle.h
+index f347b98148fb1..416f7ee153560 100644
+--- a/base/third_party/symbolize/demangle.h
++++ b/base/third_party/symbolize/demangle.h
+@@ -71,7 +71,7 @@
+ #define BASE_DEMANGLE_H_
+
+ #include "config.h"
+-#include <glog/logging.h>
++#include "glog/logging.h"
+
+ _START_GOOGLE_NAMESPACE_
+
+diff --git a/base/third_party/symbolize/symbolize.cc b/base/third_party/symbolize/symbolize.cc
+index f56e97c99332a..2cfd4c490cc94 100644
+--- a/base/third_party/symbolize/symbolize.cc
++++ b/base/third_party/symbolize/symbolize.cc
+@@ -132,7 +132,7 @@ _END_GOOGLE_NAMESPACE_
+
+ #include "symbolize.h"
+ #include "config.h"
+-#include <glog/raw_logging.h>
++#include "glog/raw_logging.h"
+
+ // Re-runs fn until it doesn't cause EINTR.
+ #define NO_INTR(fn) do {} while ((fn) < 0 && errno == EINTR)
+diff --git a/base/third_party/symbolize/symbolize.h b/base/third_party/symbolize/symbolize.h
+index dcbb194c4b37e..5959e579ffc93 100644
+--- a/base/third_party/symbolize/symbolize.h
++++ b/base/third_party/symbolize/symbolize.h
+@@ -56,7 +56,7 @@
+
+ #include "utilities.h"
+ #include "config.h"
+-#include <glog/logging.h>
++#include "glog/logging.h"
+
+ #ifdef HAVE_SYMBOLIZE
+
+diff --git a/base/third_party/symbolize/utilities.h b/base/third_party/symbolize/utilities.h
+index 760c142c09e18..efa3a8d99856e 100644
+--- a/base/third_party/symbolize/utilities.h
++++ b/base/third_party/symbolize/utilities.h
+@@ -52,11 +52,9 @@
+ #define PRIXS __PRIS_PREFIX "X"
+ #define PRIoS __PRIS_PREFIX "o"
+
+-#include "base/mutex.h" // This must go first so we get _XOPEN_SOURCE
+-
+ #include <string>
+
+-#include <glog/logging.h>
++#include "glog/logging.h"
+
+ #if defined(GLOG_OS_WINDOWS)
+ # include "port.h"
diff --git a/base/third_party/symbolize/patches/002-minimal-logging.patch b/base/third_party/symbolize/patches/002-minimal-logging.patch
new file mode 100644
index 0000000..2c37a43
--- /dev/null
+++ b/base/third_party/symbolize/patches/002-minimal-logging.patch
@@ -0,0 +1,2135 @@
+diff --git a/base/third_party/symbolize/glog/logging.h b/base/third_party/symbolize/glog/logging.h
+index 3c9253d829d7f..46869226024da 100644
+--- a/base/third_party/symbolize/glog/logging.h
++++ b/base/third_party/symbolize/glog/logging.h
+@@ -36,1979 +36,6 @@
+ #ifndef GLOG_LOGGING_H
+ #define GLOG_LOGGING_H
+
+-#if 1 && __cplusplus >= 201103L
+-#include <chrono>
+-#endif
+-
+-#include <cerrno>
+-#include <cstddef>
+-#include <cstdlib>
+-#include <cstring>
+-#include <ctime>
+-#include <iosfwd>
+-#include <ostream>
+-#include <sstream>
+-#include <string>
+-#if 1
+-# include <unistd.h>
+-#endif
+-#include <vector>
+-
+-#if defined(_MSC_VER)
+-#define GLOG_MSVC_PUSH_DISABLE_WARNING(n) __pragma(warning(push)) \
+- __pragma(warning(disable:n))
+-#define GLOG_MSVC_POP_WARNING() __pragma(warning(pop))
+-#else
+-#define GLOG_MSVC_PUSH_DISABLE_WARNING(n)
+-#define GLOG_MSVC_POP_WARNING()
+-#endif
+-
+-#include <glog/platform.h>
+-
+-#if 1
+-#include <glog/export.h>
+-#endif
+-
+-// We care a lot about number of bits things take up. Unfortunately,
+-// systems define their bit-specific ints in a lot of different ways.
+-// We use our own way, and have a typedef to get there.
+-// Note: these commands below may look like "#if 1" or "#if 0", but
+-// that's because they were constructed that way at ./configure time.
+-// Look at logging.h.in to see how they're calculated (based on your config).
+-#if 1
+-#include <stdint.h> // the normal place uint16_t is defined
+-#endif
+-#if 1
+-#include <sys/types.h> // the normal place u_int16_t is defined
+-#endif
+-#if 1
+-#include <inttypes.h> // a third place for uint16_t or u_int16_t
+-#endif
+-
+-#if 0
+-#include <gflags/gflags.h>
+-#endif
+-
+-#if 1 && __cplusplus >= 201103L
+-#include <atomic>
+-#elif defined(GLOG_OS_WINDOWS)
+-#include <Windows.h>
+-#endif
+-
+-namespace google {
+-
+-#if 1 // the C99 format
+-typedef int32_t int32;
+-typedef uint32_t uint32;
+-typedef int64_t int64;
+-typedef uint64_t uint64;
+-#elif 1 // the BSD format
+-typedef int32_t int32;
+-typedef u_int32_t uint32;
+-typedef int64_t int64;
+-typedef u_int64_t uint64;
+-#elif 0 // the windows (vc7) format
+-typedef __int32 int32;
+-typedef unsigned __int32 uint32;
+-typedef __int64 int64;
+-typedef unsigned __int64 uint64;
+-#else
+-#error Do not know how to define a 32-bit integer quantity on your system
+-#endif
+-
+-#if !(1)
+-typedef ptrdiff_t ssize_t;
+-#endif
+-
+-#if !(1)
+-typedef int mode_t;
+-#endif
+-
+-typedef double WallTime;
+-
+-struct GLOG_EXPORT LogMessageTime {
+- LogMessageTime();
+- LogMessageTime(std::tm t);
+- LogMessageTime(std::time_t timestamp, WallTime now);
+-
+- const time_t& timestamp() const { return timestamp_; }
+- const int& sec() const { return time_struct_.tm_sec; }
+- const int32_t& usec() const { return usecs_; }
+- const int&(min)() const { return time_struct_.tm_min; }
+- const int& hour() const { return time_struct_.tm_hour; }
+- const int& day() const { return time_struct_.tm_mday; }
+- const int& month() const { return time_struct_.tm_mon; }
+- const int& year() const { return time_struct_.tm_year; }
+- const int& dayOfWeek() const { return time_struct_.tm_wday; }
+- const int& dayInYear() const { return time_struct_.tm_yday; }
+- const int& dst() const { return time_struct_.tm_isdst; }
+- const long int& gmtoff() const { return gmtoffset_; }
+- const std::tm& tm() const { return time_struct_; }
+-
+- private:
+- void init(const std::tm& t, std::time_t timestamp, WallTime now);
+- std::tm time_struct_; // Time of creation of LogMessage
+- time_t timestamp_; // Time of creation of LogMessage in seconds
+- int32_t usecs_; // Time of creation of LogMessage - microseconds part
+- long int gmtoffset_;
+-
+- void CalcGmtOffset();
+-};
+-
+-#ifdef GLOG_CUSTOM_PREFIX_SUPPORT
+-struct LogMessageInfo {
+- explicit LogMessageInfo(const char* const severity_,
+- const char* const filename_,
+- const int& line_number_,
+- const int& thread_id_,
+- const LogMessageTime& time_):
+- severity(severity_), filename(filename_), line_number(line_number_),
+- thread_id(thread_id_), time(time_)
+- {}
+-
+- const char* const severity;
+- const char* const filename;
+- const int &line_number;
+- const int &thread_id;
+- const LogMessageTime& time;
+-};
+-
+-typedef void(*CustomPrefixCallback)(std::ostream& s, const LogMessageInfo& l, void* data);
+-
+-#endif
+-
+-}
+-
+-
+-// The global value of GOOGLE_STRIP_LOG. All the messages logged to
+-// LOG(XXX) with severity less than GOOGLE_STRIP_LOG will not be displayed.
+-// If it can be determined at compile time that the message will not be
+-// printed, the statement will be compiled out.
+-//
+-// Example: to strip out all INFO and WARNING messages, use the value
+-// of 2 below. To make an exception for WARNING messages from a single
+-// file, add "#define GOOGLE_STRIP_LOG 1" to that file _before_ including
+-// base/logging.h
+-#ifndef GOOGLE_STRIP_LOG
+-#define GOOGLE_STRIP_LOG 0
+-#endif
+-
+-// GCC can be told that a certain branch is not likely to be taken (for
+-// instance, a CHECK failure), and use that information in static analysis.
+-// Giving it this information can help it optimize for the common case in
+-// the absence of better information (ie. -fprofile-arcs).
+-//
+-#ifndef GOOGLE_PREDICT_BRANCH_NOT_TAKEN
+-#if 1
+-#define GOOGLE_PREDICT_BRANCH_NOT_TAKEN(x) (__builtin_expect(x, 0))
+-#else
+-#define GOOGLE_PREDICT_BRANCH_NOT_TAKEN(x) x
+-#endif
+-#endif
+-
+-#ifndef GOOGLE_PREDICT_FALSE
+-#if 1
+-#define GOOGLE_PREDICT_FALSE(x) (__builtin_expect(x, 0))
+-#else
+-#define GOOGLE_PREDICT_FALSE(x) x
+-#endif
+-#endif
+-
+-#ifndef GOOGLE_PREDICT_TRUE
+-#if 1
+-#define GOOGLE_PREDICT_TRUE(x) (__builtin_expect(!!(x), 1))
+-#else
+-#define GOOGLE_PREDICT_TRUE(x) x
+-#endif
+-#endif
+-
+-
+-// Make a bunch of macros for logging. The way to log things is to stream
+-// things to LOG(<a particular severity level>). E.g.,
+-//
+-// LOG(INFO) << "Found " << num_cookies << " cookies";
+-//
+-// You can capture log messages in a string, rather than reporting them
+-// immediately:
+-//
+-// vector<string> errors;
+-// LOG_STRING(ERROR, &errors) << "Couldn't parse cookie #" << cookie_num;
+-//
+-// This pushes back the new error onto 'errors'; if given a NULL pointer,
+-// it reports the error via LOG(ERROR).
+-//
+-// You can also do conditional logging:
+-//
+-// LOG_IF(INFO, num_cookies > 10) << "Got lots of cookies";
+-//
+-// You can also do occasional logging (log every n'th occurrence of an
+-// event):
+-//
+-// LOG_EVERY_N(INFO, 10) << "Got the " << google::COUNTER << "th cookie";
+-//
+-// The above will cause log messages to be output on the 1st, 11th, 21st, ...
+-// times it is executed. Note that the special google::COUNTER value is used
+-// to identify which repetition is happening.
+-//
+-// You can also do occasional conditional logging (log every n'th
+-// occurrence of an event, when condition is satisfied):
+-//
+-// LOG_IF_EVERY_N(INFO, (size > 1024), 10) << "Got the " << google::COUNTER
+-// << "th big cookie";
+-//
+-// You can log messages the first N times your code executes a line. E.g.
+-//
+-// LOG_FIRST_N(INFO, 20) << "Got the " << google::COUNTER << "th cookie";
+-//
+-// Outputs log messages for the first 20 times it is executed.
+-//
+-// Analogous SYSLOG, SYSLOG_IF, and SYSLOG_EVERY_N macros are available.
+-// These log to syslog as well as to the normal logs. If you use these at
+-// all, you need to be aware that syslog can drastically reduce performance,
+-// especially if it is configured for remote logging! Don't use these
+-// unless you fully understand this and have a concrete need to use them.
+-// Even then, try to minimize your use of them.
+-//
+-// There are also "debug mode" logging macros like the ones above:
+-//
+-// DLOG(INFO) << "Found cookies";
+-//
+-// DLOG_IF(INFO, num_cookies > 10) << "Got lots of cookies";
+-//
+-// DLOG_EVERY_N(INFO, 10) << "Got the " << google::COUNTER << "th cookie";
+-//
+-// All "debug mode" logging is compiled away to nothing for non-debug mode
+-// compiles.
+-//
+-// We also have
+-//
+-// LOG_ASSERT(assertion);
+-// DLOG_ASSERT(assertion);
+-//
+-// which is syntactic sugar for {,D}LOG_IF(FATAL, assert fails) << assertion;
+-//
+-// There are "verbose level" logging macros. They look like
+-//
+-// VLOG(1) << "I'm printed when you run the program with --v=1 or more";
+-// VLOG(2) << "I'm printed when you run the program with --v=2 or more";
+-//
+-// These always log at the INFO log level (when they log at all).
+-// The verbose logging can also be turned on module-by-module. For instance,
+-// --vmodule=mapreduce=2,file=1,gfs*=3 --v=0
+-// will cause:
+-// a. VLOG(2) and lower messages to be printed from mapreduce.{h,cc}
+-// b. VLOG(1) and lower messages to be printed from file.{h,cc}
+-// c. VLOG(3) and lower messages to be printed from files prefixed with "gfs"
+-// d. VLOG(0) and lower messages to be printed from elsewhere
+-//
+-// The wildcarding functionality shown by (c) supports both '*' (match
+-// 0 or more characters) and '?' (match any single character) wildcards.
+-//
+-// There's also VLOG_IS_ON(n) "verbose level" condition macro. To be used as
+-//
+-// if (VLOG_IS_ON(2)) {
+-// // do some logging preparation and logging
+-// // that can't be accomplished with just VLOG(2) << ...;
+-// }
+-//
+-// There are also VLOG_IF, VLOG_EVERY_N and VLOG_IF_EVERY_N "verbose level"
+-// condition macros for sample cases, when some extra computation and
+-// preparation for logs is not needed.
+-// VLOG_IF(1, (size > 1024))
+-// << "I'm printed when size is more than 1024 and when you run the "
+-// "program with --v=1 or more";
+-// VLOG_EVERY_N(1, 10)
+-// << "I'm printed every 10th occurrence, and when you run the program "
+-// "with --v=1 or more. Present occurence is " << google::COUNTER;
+-// VLOG_IF_EVERY_N(1, (size > 1024), 10)
+-// << "I'm printed on every 10th occurence of case when size is more "
+-// " than 1024, when you run the program with --v=1 or more. ";
+-// "Present occurence is " << google::COUNTER;
+-//
+-// The supported severity levels for macros that allow you to specify one
+-// are (in increasing order of severity) INFO, WARNING, ERROR, and FATAL.
+-// Note that messages of a given severity are logged not only in the
+-// logfile for that severity, but also in all logfiles of lower severity.
+-// E.g., a message of severity FATAL will be logged to the logfiles of
+-// severity FATAL, ERROR, WARNING, and INFO.
+-//
+-// There is also the special severity of DFATAL, which logs FATAL in
+-// debug mode, ERROR in normal mode.
+-//
+-// Very important: logging a message at the FATAL severity level causes
+-// the program to terminate (after the message is logged).
+-//
+-// Unless otherwise specified, logs will be written to the filename
+-// "<program name>.<hostname>.<user name>.log.<severity level>.", followed
+-// by the date, time, and pid (you can't prevent the date, time, and pid
+-// from being in the filename).
+-//
+-// The logging code takes two flags:
+-// --v=# set the verbose level
+-// --logtostderr log all the messages to stderr instead of to logfiles
+-
+-// LOG LINE PREFIX FORMAT
+-//
+-// Log lines have this form:
+-//
+-// Lyyyymmdd hh:mm:ss.uuuuuu threadid file:line] msg...
+-//
+-// where the fields are defined as follows:
+-//
+-// L A single character, representing the log level
+-// (eg 'I' for INFO)
+-// yyyy The year
+-// mm The month (zero padded; ie May is '05')
+-// dd The day (zero padded)
+-// hh:mm:ss.uuuuuu Time in hours, minutes and fractional seconds
+-// threadid The space-padded thread ID as returned by GetTID()
+-// (this matches the PID on Linux)
+-// file The file name
+-// line The line number
+-// msg The user-supplied message
+-//
+-// Example:
+-//
+-// I1103 11:57:31.739339 24395 google.cc:2341] Command line: ./some_prog
+-// I1103 11:57:31.739403 24395 google.cc:2342] Process id 24395
+-//
+-// NOTE: although the microseconds are useful for comparing events on
+-// a single machine, clocks on different machines may not be well
+-// synchronized. Hence, use caution when comparing the low bits of
+-// timestamps from different machines.
+-
+-#pragma push_macro("DECLARE_VARIABLE")
+-#pragma push_macro("DECLARE_bool")
+-#pragma push_macro("DECLARE_string")
+-#pragma push_macro("DECLARE_int32")
+-#pragma push_macro("DECLARE_uint32")
+-
+-#ifdef DECLARE_VARIABLE
+-#undef DECLARE_VARIABLE
+-#endif
+-
+-#ifdef DECLARE_bool
+-#undef DECLARE_bool
+-#endif
+-
+-#ifdef DECLARE_string
+-#undef DECLARE_string
+-#endif
+-
+-#ifdef DECLARE_int32
+-#undef DECLARE_int32
+-#endif
+-
+-#ifdef DECLARE_uint32
+-#undef DECLARE_uint32
+-#endif
+-
+-#ifndef DECLARE_VARIABLE
+-#define DECLARE_VARIABLE(type, shorttype, name, tn) \
+- namespace fL##shorttype { \
+- extern GLOG_EXPORT type FLAGS_##name; \
+- } \
+- using fL##shorttype::FLAGS_##name
+-
+-// bool specialization
+-#define DECLARE_bool(name) \
+- DECLARE_VARIABLE(bool, B, name, bool)
+-
+-// int32 specialization
+-#define DECLARE_int32(name) \
+- DECLARE_VARIABLE(google::int32, I, name, int32)
+-
+-#if !defined(DECLARE_uint32)
+-// uint32 specialization
+-#define DECLARE_uint32(name) \
+- DECLARE_VARIABLE(google::uint32, U, name, uint32)
+-#endif // !defined(DECLARE_uint32) && !(0)
+-
+-// Special case for string, because we have to specify the namespace
+-// std::string, which doesn't play nicely with our FLAG__namespace hackery.
+-#define DECLARE_string(name) \
+- namespace fLS { \
+- extern GLOG_EXPORT std::string& FLAGS_##name; \
+- } \
+- using fLS::FLAGS_##name
+-#endif
+-
+-// Set whether appending a timestamp to the log file name
+-DECLARE_bool(timestamp_in_logfile_name);
+-
+-// Set whether log messages go to stdout instead of logfiles
+-DECLARE_bool(logtostdout);
+-
+-// Set color messages logged to stdout (if supported by terminal).
+-DECLARE_bool(colorlogtostdout);
+-
+-// Set whether log messages go to stderr instead of logfiles
+-DECLARE_bool(logtostderr);
+-
+-// Set whether log messages go to stderr in addition to logfiles.
+-DECLARE_bool(alsologtostderr);
+-
+-// Set color messages logged to stderr (if supported by terminal).
+-DECLARE_bool(colorlogtostderr);
+-
+-// Log messages at a level >= this flag are automatically sent to
+-// stderr in addition to log files.
+-DECLARE_int32(stderrthreshold);
+-
+-// Set whether the log file header should be written upon creating a file.
+-DECLARE_bool(log_file_header);
+-
+-// Set whether the log prefix should be prepended to each line of output.
+-DECLARE_bool(log_prefix);
+-
+-// Set whether the year should be included in the log prefix.
+-DECLARE_bool(log_year_in_prefix);
+-
+-// Log messages at a level <= this flag are buffered.
+-// Log messages at a higher level are flushed immediately.
+-DECLARE_int32(logbuflevel);
+-
+-// Sets the maximum number of seconds which logs may be buffered for.
+-DECLARE_int32(logbufsecs);
+-
+-// Log suppression level: messages logged at a lower level than this
+-// are suppressed.
+-DECLARE_int32(minloglevel);
+-
+-// If specified, logfiles are written into this directory instead of the
+-// default logging directory.
+-DECLARE_string(log_dir);
+-
+-// Set the log file mode.
+-DECLARE_int32(logfile_mode);
+-
+-// Sets the path of the directory into which to put additional links
+-// to the log files.
+-DECLARE_string(log_link);
+-
+-DECLARE_int32(v); // in vlog_is_on.cc
+-
+-DECLARE_string(vmodule); // also in vlog_is_on.cc
+-
+-// Sets the maximum log file size (in MB).
+-DECLARE_uint32(max_log_size);
+-
+-// Sets whether to avoid logging to the disk if the disk is full.
+-DECLARE_bool(stop_logging_if_full_disk);
+-
+-// Use UTC time for logging
+-DECLARE_bool(log_utc_time);
+-
+-// Log messages below the GOOGLE_STRIP_LOG level will be compiled away for
+-// security reasons. See LOG(severtiy) below.
+-
+-// A few definitions of macros that don't generate much code. Since
+-// LOG(INFO) and its ilk are used all over our code, it's
+-// better to have compact code for these operations.
+-
+-#if GOOGLE_STRIP_LOG == 0
+-#define COMPACT_GOOGLE_LOG_INFO google::LogMessage( \
+- __FILE__, __LINE__)
+-#define LOG_TO_STRING_INFO(message) google::LogMessage( \
+- __FILE__, __LINE__, google::GLOG_INFO, message)
+-#else
+-#define COMPACT_GOOGLE_LOG_INFO google::NullStream()
+-#define LOG_TO_STRING_INFO(message) google::NullStream()
+-#endif
+-
+-#if GOOGLE_STRIP_LOG <= 1
+-#define COMPACT_GOOGLE_LOG_WARNING google::LogMessage( \
+- __FILE__, __LINE__, google::GLOG_WARNING)
+-#define LOG_TO_STRING_WARNING(message) google::LogMessage( \
+- __FILE__, __LINE__, google::GLOG_WARNING, message)
+-#else
+-#define COMPACT_GOOGLE_LOG_WARNING google::NullStream()
+-#define LOG_TO_STRING_WARNING(message) google::NullStream()
+-#endif
+-
+-#if GOOGLE_STRIP_LOG <= 2
+-#define COMPACT_GOOGLE_LOG_ERROR google::LogMessage( \
+- __FILE__, __LINE__, google::GLOG_ERROR)
+-#define LOG_TO_STRING_ERROR(message) google::LogMessage( \
+- __FILE__, __LINE__, google::GLOG_ERROR, message)
+-#else
+-#define COMPACT_GOOGLE_LOG_ERROR google::NullStream()
+-#define LOG_TO_STRING_ERROR(message) google::NullStream()
+-#endif
+-
+-#if GOOGLE_STRIP_LOG <= 3
+-#define COMPACT_GOOGLE_LOG_FATAL google::LogMessageFatal( \
+- __FILE__, __LINE__)
+-#define LOG_TO_STRING_FATAL(message) google::LogMessage( \
+- __FILE__, __LINE__, google::GLOG_FATAL, message)
+-#else
+-#define COMPACT_GOOGLE_LOG_FATAL google::NullStreamFatal()
+-#define LOG_TO_STRING_FATAL(message) google::NullStreamFatal()
+-#endif
+-
+-#if defined(NDEBUG) && !defined(DCHECK_ALWAYS_ON)
+-#define DCHECK_IS_ON() 0
+-#else
+-#define DCHECK_IS_ON() 1
+-#endif
+-
+-// For DFATAL, we want to use LogMessage (as opposed to
+-// LogMessageFatal), to be consistent with the original behavior.
+-#if !DCHECK_IS_ON()
+-#define COMPACT_GOOGLE_LOG_DFATAL COMPACT_GOOGLE_LOG_ERROR
+-#elif GOOGLE_STRIP_LOG <= 3
+-#define COMPACT_GOOGLE_LOG_DFATAL google::LogMessage( \
+- __FILE__, __LINE__, google::GLOG_FATAL)
+-#else
+-#define COMPACT_GOOGLE_LOG_DFATAL google::NullStreamFatal()
+-#endif
+-
+-#define GOOGLE_LOG_INFO(counter) google::LogMessage(__FILE__, __LINE__, google::GLOG_INFO, counter, &google::LogMessage::SendToLog)
+-#define SYSLOG_INFO(counter) \
+- google::LogMessage(__FILE__, __LINE__, google::GLOG_INFO, counter, \
+- &google::LogMessage::SendToSyslogAndLog)
+-#define GOOGLE_LOG_WARNING(counter) \
+- google::LogMessage(__FILE__, __LINE__, google::GLOG_WARNING, counter, \
+- &google::LogMessage::SendToLog)
+-#define SYSLOG_WARNING(counter) \
+- google::LogMessage(__FILE__, __LINE__, google::GLOG_WARNING, counter, \
+- &google::LogMessage::SendToSyslogAndLog)
+-#define GOOGLE_LOG_ERROR(counter) \
+- google::LogMessage(__FILE__, __LINE__, google::GLOG_ERROR, counter, \
+- &google::LogMessage::SendToLog)
+-#define SYSLOG_ERROR(counter) \
+- google::LogMessage(__FILE__, __LINE__, google::GLOG_ERROR, counter, \
+- &google::LogMessage::SendToSyslogAndLog)
+-#define GOOGLE_LOG_FATAL(counter) \
+- google::LogMessage(__FILE__, __LINE__, google::GLOG_FATAL, counter, \
+- &google::LogMessage::SendToLog)
+-#define SYSLOG_FATAL(counter) \
+- google::LogMessage(__FILE__, __LINE__, google::GLOG_FATAL, counter, \
+- &google::LogMessage::SendToSyslogAndLog)
+-#define GOOGLE_LOG_DFATAL(counter) \
+- google::LogMessage(__FILE__, __LINE__, google::DFATAL_LEVEL, counter, \
+- &google::LogMessage::SendToLog)
+-#define SYSLOG_DFATAL(counter) \
+- google::LogMessage(__FILE__, __LINE__, google::DFATAL_LEVEL, counter, \
+- &google::LogMessage::SendToSyslogAndLog)
+-
+-#if defined(WIN32) || defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) || defined(__CYGWIN32__)
+-// A very useful logging macro to log windows errors:
+-#define LOG_SYSRESULT(result) \
+- if (FAILED(HRESULT_FROM_WIN32(result))) { \
+- LPSTR message = NULL; \
+- LPSTR msg = reinterpret_cast<LPSTR>(&message); \
+- DWORD message_length = FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | \
+- FORMAT_MESSAGE_FROM_SYSTEM | \
+- FORMAT_MESSAGE_IGNORE_INSERTS, \
+- 0, result, 0, msg, 100, NULL); \
+- if (message_length > 0) { \
+- google::LogMessage(__FILE__, __LINE__, google::GLOG_ERROR, 0, \
+- &google::LogMessage::SendToLog).stream() \
+- << reinterpret_cast<const char*>(message); \
+- LocalFree(message); \
+- } \
+- }
+-#endif
+-
+-// We use the preprocessor's merging operator, "##", so that, e.g.,
+-// LOG(INFO) becomes the token GOOGLE_LOG_INFO. There's some funny
+-// subtle difference between ostream member streaming functions (e.g.,
+-// ostream::operator<<(int) and ostream non-member streaming functions
+-// (e.g., ::operator<<(ostream&, string&): it turns out that it's
+-// impossible to stream something like a string directly to an unnamed
+-// ostream. We employ a neat hack by calling the stream() member
+-// function of LogMessage which seems to avoid the problem.
+-#define LOG(severity) COMPACT_GOOGLE_LOG_ ## severity.stream()
+-#define SYSLOG(severity) SYSLOG_ ## severity(0).stream()
+-
+-namespace google {
+-
+-// They need the definitions of integer types.
+-#include <glog/log_severity.h>
+-#include <glog/vlog_is_on.h>
+-
+-// Initialize google's logging library. You will see the program name
+-// specified by argv0 in log outputs.
+-GLOG_EXPORT void InitGoogleLogging(const char* argv0);
+-
+-#ifdef GLOG_CUSTOM_PREFIX_SUPPORT
+-GLOG_EXPORT void InitGoogleLogging(const char* argv0,
+- CustomPrefixCallback prefix_callback,
+- void* prefix_callback_data = NULL);
+-#endif
+-
+-// Check if google's logging library has been initialized.
+-GLOG_EXPORT bool IsGoogleLoggingInitialized();
+-
+-// Shutdown google's logging library.
+-GLOG_EXPORT void ShutdownGoogleLogging();
+-
+-#if defined(__GNUC__)
+-typedef void (*logging_fail_func_t)() __attribute__((noreturn));
+-#else
+-typedef void (*logging_fail_func_t)();
+-#endif
+-
+-// Install a function which will be called after LOG(FATAL).
+-GLOG_EXPORT void InstallFailureFunction(logging_fail_func_t fail_func);
+-
+-// Enable/Disable old log cleaner.
+-GLOG_EXPORT void EnableLogCleaner(unsigned int overdue_days);
+-GLOG_EXPORT void DisableLogCleaner();
+-GLOG_EXPORT void SetApplicationFingerprint(const std::string& fingerprint);
+-
+-class LogSink; // defined below
+-
+-// If a non-NULL sink pointer is given, we push this message to that sink.
+-// For LOG_TO_SINK we then do normal LOG(severity) logging as well.
+-// This is useful for capturing messages and passing/storing them
+-// somewhere more specific than the global log of the process.
+-// Argument types:
+-// LogSink* sink;
+-// LogSeverity severity;
+-// The cast is to disambiguate NULL arguments.
+-#define LOG_TO_SINK(sink, severity) \
+- google::LogMessage( \
+- __FILE__, __LINE__, \
+- google::GLOG_ ## severity, \
+- static_cast<google::LogSink*>(sink), true).stream()
+-#define LOG_TO_SINK_BUT_NOT_TO_LOGFILE(sink, severity) \
+- google::LogMessage( \
+- __FILE__, __LINE__, \
+- google::GLOG_ ## severity, \
+- static_cast<google::LogSink*>(sink), false).stream()
+-
+-// If a non-NULL string pointer is given, we write this message to that string.
+-// We then do normal LOG(severity) logging as well.
+-// This is useful for capturing messages and storing them somewhere more
+-// specific than the global log of the process.
+-// Argument types:
+-// string* message;
+-// LogSeverity severity;
+-// The cast is to disambiguate NULL arguments.
+-// NOTE: LOG(severity) expands to LogMessage().stream() for the specified
+-// severity.
+-#define LOG_TO_STRING(severity, message) \
+- LOG_TO_STRING_##severity(static_cast<std::string*>(message)).stream()
+-
+-// If a non-NULL pointer is given, we push the message onto the end
+-// of a vector of strings; otherwise, we report it with LOG(severity).
+-// This is handy for capturing messages and perhaps passing them back
+-// to the caller, rather than reporting them immediately.
+-// Argument types:
+-// LogSeverity severity;
+-// vector<string> *outvec;
+-// The cast is to disambiguate NULL arguments.
+-#define LOG_STRING(severity, outvec) \
+- LOG_TO_STRING_##severity(static_cast<std::vector<std::string>*>(outvec)).stream()
+-
+-#define LOG_IF(severity, condition) \
+- static_cast<void>(0), \
+- !(condition) ? (void) 0 : google::LogMessageVoidify() & LOG(severity)
+-#define SYSLOG_IF(severity, condition) \
+- static_cast<void>(0), \
+- !(condition) ? (void) 0 : google::LogMessageVoidify() & SYSLOG(severity)
+-
+-#define LOG_ASSERT(condition) \
+- LOG_IF(FATAL, !(condition)) << "Assert failed: " #condition
+-#define SYSLOG_ASSERT(condition) \
+- SYSLOG_IF(FATAL, !(condition)) << "Assert failed: " #condition
+-
+-// CHECK dies with a fatal error if condition is not true. It is *not*
+-// controlled by DCHECK_IS_ON(), so the check will be executed regardless of
+-// compilation mode. Therefore, it is safe to do things like:
+-// CHECK(fp->Write(x) == 4)
+-#define CHECK(condition) \
+- LOG_IF(FATAL, GOOGLE_PREDICT_BRANCH_NOT_TAKEN(!(condition))) \
+- << "Check failed: " #condition " "
+-
+-// A container for a string pointer which can be evaluated to a bool -
+-// true iff the pointer is NULL.
+-struct CheckOpString {
+- CheckOpString(std::string* str) : str_(str) { }
+- // No destructor: if str_ is non-NULL, we're about to LOG(FATAL),
+- // so there's no point in cleaning up str_.
+- operator bool() const {
+- return GOOGLE_PREDICT_BRANCH_NOT_TAKEN(str_ != NULL);
+- }
+- std::string* str_;
+-};
+-
+-// Function is overloaded for integral types to allow static const
+-// integrals declared in classes and not defined to be used as arguments to
+-// CHECK* macros. It's not encouraged though.
+-template <class T>
+-inline const T& GetReferenceableValue(const T& t) { return t; }
+-inline char GetReferenceableValue(char t) { return t; }
+-inline unsigned char GetReferenceableValue(unsigned char t) { return t; }
+-inline signed char GetReferenceableValue(signed char t) { return t; }
+-inline short GetReferenceableValue(short t) { return t; }
+-inline unsigned short GetReferenceableValue(unsigned short t) { return t; }
+-inline int GetReferenceableValue(int t) { return t; }
+-inline unsigned int GetReferenceableValue(unsigned int t) { return t; }
+-inline long GetReferenceableValue(long t) { return t; }
+-inline unsigned long GetReferenceableValue(unsigned long t) { return t; }
+-#if __cplusplus >= 201103L
+-inline long long GetReferenceableValue(long long t) { return t; }
+-inline unsigned long long GetReferenceableValue(unsigned long long t) {
+- return t;
+-}
+-#endif
+-
+-// This is a dummy class to define the following operator.
+-struct DummyClassToDefineOperator {};
+-
+-}
+-
+-// Define global operator<< to declare using ::operator<<.
+-// This declaration will allow use to use CHECK macros for user
+-// defined classes which have operator<< (e.g., stl_logging.h).
+-inline std::ostream& operator<<(
+- std::ostream& out, const google::DummyClassToDefineOperator&) {
+- return out;
+-}
+-
+-namespace google {
+-
+-// This formats a value for a failing CHECK_XX statement. Ordinarily,
+-// it uses the definition for operator<<, with a few special cases below.
+-template <typename T>
+-inline void MakeCheckOpValueString(std::ostream* os, const T& v) {
+- (*os) << v;
+-}
+-
+-// Overrides for char types provide readable values for unprintable
+-// characters.
+-template <> GLOG_EXPORT
+-void MakeCheckOpValueString(std::ostream* os, const char& v);
+-template <> GLOG_EXPORT
+-void MakeCheckOpValueString(std::ostream* os, const signed char& v);
+-template <> GLOG_EXPORT
+-void MakeCheckOpValueString(std::ostream* os, const unsigned char& v);
+-
+-// This is required because nullptr is only present in c++ 11 and later.
+-#if 1 && __cplusplus >= 201103L
+-// Provide printable value for nullptr_t
+-template <> GLOG_EXPORT
+-void MakeCheckOpValueString(std::ostream* os, const std::nullptr_t& v);
+-#endif
+-
+-// Build the error message string. Specify no inlining for code size.
+-template <typename T1, typename T2>
+-std::string* MakeCheckOpString(const T1& v1, const T2& v2, const char* exprtext)
+- __attribute__((noinline));
+-
+-namespace base {
+-namespace internal {
+-
+-// If "s" is less than base_logging::INFO, returns base_logging::INFO.
+-// If "s" is greater than base_logging::FATAL, returns
+-// base_logging::ERROR. Otherwise, returns "s".
+-LogSeverity NormalizeSeverity(LogSeverity s);
+-
+-} // namespace internal
+-
+-// A helper class for formatting "expr (V1 vs. V2)" in a CHECK_XX
+-// statement. See MakeCheckOpString for sample usage. Other
+-// approaches were considered: use of a template method (e.g.,
+-// base::BuildCheckOpString(exprtext, base::Print<T1>, &v1,
+-// base::Print<T2>, &v2), however this approach has complications
+-// related to volatile arguments and function-pointer arguments).
+-class GLOG_EXPORT CheckOpMessageBuilder {
+- public:
+- // Inserts "exprtext" and " (" to the stream.
+- explicit CheckOpMessageBuilder(const char *exprtext);
+- // Deletes "stream_".
+- ~CheckOpMessageBuilder();
+- // For inserting the first variable.
+- std::ostream* ForVar1() { return stream_; }
+- // For inserting the second variable (adds an intermediate " vs. ").
+- std::ostream* ForVar2();
+- // Get the result (inserts the closing ")").
+- std::string* NewString();
+-
+- private:
+- std::ostringstream *stream_;
+-};
+-
+-} // namespace base
+-
+-template <typename T1, typename T2>
+-std::string* MakeCheckOpString(const T1& v1, const T2& v2, const char* exprtext) {
+- base::CheckOpMessageBuilder comb(exprtext);
+- MakeCheckOpValueString(comb.ForVar1(), v1);
+- MakeCheckOpValueString(comb.ForVar2(), v2);
+- return comb.NewString();
+-}
+-
+-// Helper functions for CHECK_OP macro.
+-// The (int, int) specialization works around the issue that the compiler
+-// will not instantiate the template version of the function on values of
+-// unnamed enum type - see comment below.
+-#define DEFINE_CHECK_OP_IMPL(name, op) \
+- template <typename T1, typename T2> \
+- inline std::string* name##Impl(const T1& v1, const T2& v2, \
+- const char* exprtext) { \
+- if (GOOGLE_PREDICT_TRUE(v1 op v2)) return NULL; \
+- else return MakeCheckOpString(v1, v2, exprtext); \
+- } \
+- inline std::string* name##Impl(int v1, int v2, const char* exprtext) { \
+- return name##Impl<int, int>(v1, v2, exprtext); \
+- }
+-
+-// We use the full name Check_EQ, Check_NE, etc. in case the file including
+-// base/logging.h provides its own #defines for the simpler names EQ, NE, etc.
+-// This happens if, for example, those are used as token names in a
+-// yacc grammar.
+-DEFINE_CHECK_OP_IMPL(Check_EQ, ==) // Compilation error with CHECK_EQ(NULL, x)?
+-DEFINE_CHECK_OP_IMPL(Check_NE, !=) // Use CHECK(x == NULL) instead.
+-DEFINE_CHECK_OP_IMPL(Check_LE, <=)
+-DEFINE_CHECK_OP_IMPL(Check_LT, < )
+-DEFINE_CHECK_OP_IMPL(Check_GE, >=)
+-DEFINE_CHECK_OP_IMPL(Check_GT, > )
+-#undef DEFINE_CHECK_OP_IMPL
+-
+-// Helper macro for binary operators.
+-// Don't use this macro directly in your code, use CHECK_EQ et al below.
+-
+-#if defined(STATIC_ANALYSIS)
+-// Only for static analysis tool to know that it is equivalent to assert
+-#define CHECK_OP_LOG(name, op, val1, val2, log) CHECK((val1) op (val2))
+-#elif DCHECK_IS_ON()
+-// In debug mode, avoid constructing CheckOpStrings if possible,
+-// to reduce the overhead of CHECK statments by 2x.
+-// Real DCHECK-heavy tests have seen 1.5x speedups.
+-
+-// The meaning of "string" might be different between now and
+-// when this macro gets invoked (e.g., if someone is experimenting
+-// with other string implementations that get defined after this
+-// file is included). Save the current meaning now and use it
+-// in the macro.
+-typedef std::string _Check_string;
+-#define CHECK_OP_LOG(name, op, val1, val2, log) \
+- while (google::_Check_string* _result = \
+- google::Check##name##Impl( \
+- google::GetReferenceableValue(val1), \
+- google::GetReferenceableValue(val2), \
+- #val1 " " #op " " #val2)) \
+- log(__FILE__, __LINE__, \
+- google::CheckOpString(_result)).stream()
+-#else
+-// In optimized mode, use CheckOpString to hint to compiler that
+-// the while condition is unlikely.
+-#define CHECK_OP_LOG(name, op, val1, val2, log) \
+- while (google::CheckOpString _result = \
+- google::Check##name##Impl( \
+- google::GetReferenceableValue(val1), \
+- google::GetReferenceableValue(val2), \
+- #val1 " " #op " " #val2)) \
+- log(__FILE__, __LINE__, _result).stream()
+-#endif // STATIC_ANALYSIS, DCHECK_IS_ON()
+-
+-#if GOOGLE_STRIP_LOG <= 3
+-#define CHECK_OP(name, op, val1, val2) \
+- CHECK_OP_LOG(name, op, val1, val2, google::LogMessageFatal)
+-#else
+-#define CHECK_OP(name, op, val1, val2) \
+- CHECK_OP_LOG(name, op, val1, val2, google::NullStreamFatal)
+-#endif // STRIP_LOG <= 3
+-
+-// Equality/Inequality checks - compare two values, and log a FATAL message
+-// including the two values when the result is not as expected. The values
+-// must have operator<<(ostream, ...) defined.
+-//
+-// You may append to the error message like so:
+-// CHECK_NE(1, 2) << ": The world must be ending!";
+-//
+-// We are very careful to ensure that each argument is evaluated exactly
+-// once, and that anything which is legal to pass as a function argument is
+-// legal here. In particular, the arguments may be temporary expressions
+-// which will end up being destroyed at the end of the apparent statement,
+-// for example:
+-// CHECK_EQ(string("abc")[1], 'b');
+-//
+-// WARNING: These don't compile correctly if one of the arguments is a pointer
+-// and the other is NULL. To work around this, simply static_cast NULL to the
+-// type of the desired pointer.
+-
+-#define CHECK_EQ(val1, val2) CHECK_OP(_EQ, ==, val1, val2)
+-#define CHECK_NE(val1, val2) CHECK_OP(_NE, !=, val1, val2)
+-#define CHECK_LE(val1, val2) CHECK_OP(_LE, <=, val1, val2)
+-#define CHECK_LT(val1, val2) CHECK_OP(_LT, < , val1, val2)
+-#define CHECK_GE(val1, val2) CHECK_OP(_GE, >=, val1, val2)
+-#define CHECK_GT(val1, val2) CHECK_OP(_GT, > , val1, val2)
+-
+-// Check that the input is non NULL. This very useful in constructor
+-// initializer lists.
+-
+-#define CHECK_NOTNULL(val) \
+- google::CheckNotNull(__FILE__, __LINE__, "'" #val "' Must be non NULL", (val))
+-
+-// Helper functions for string comparisons.
+-// To avoid bloat, the definitions are in logging.cc.
+-#define DECLARE_CHECK_STROP_IMPL(func, expected) \
+- GLOG_EXPORT std::string* Check##func##expected##Impl( \
+- const char* s1, const char* s2, const char* names);
+-DECLARE_CHECK_STROP_IMPL(strcmp, true)
+-DECLARE_CHECK_STROP_IMPL(strcmp, false)
+-DECLARE_CHECK_STROP_IMPL(strcasecmp, true)
+-DECLARE_CHECK_STROP_IMPL(strcasecmp, false)
+-#undef DECLARE_CHECK_STROP_IMPL
+-
+-// Helper macro for string comparisons.
+-// Don't use this macro directly in your code, use CHECK_STREQ et al below.
+-#define CHECK_STROP(func, op, expected, s1, s2) \
+- while (google::CheckOpString _result = \
+- google::Check##func##expected##Impl((s1), (s2), \
+- #s1 " " #op " " #s2)) \
+- LOG(FATAL) << *_result.str_
+-
+-
+-// String (char*) equality/inequality checks.
+-// CASE versions are case-insensitive.
+-//
+-// Note that "s1" and "s2" may be temporary strings which are destroyed
+-// by the compiler at the end of the current "full expression"
+-// (e.g. CHECK_STREQ(Foo().c_str(), Bar().c_str())).
+-
+-#define CHECK_STREQ(s1, s2) CHECK_STROP(strcmp, ==, true, s1, s2)
+-#define CHECK_STRNE(s1, s2) CHECK_STROP(strcmp, !=, false, s1, s2)
+-#define CHECK_STRCASEEQ(s1, s2) CHECK_STROP(strcasecmp, ==, true, s1, s2)
+-#define CHECK_STRCASENE(s1, s2) CHECK_STROP(strcasecmp, !=, false, s1, s2)
+-
+-#define CHECK_INDEX(I,A) CHECK(I < (sizeof(A)/sizeof(A[0])))
+-#define CHECK_BOUND(B,A) CHECK(B <= (sizeof(A)/sizeof(A[0])))
+-
+-#define CHECK_DOUBLE_EQ(val1, val2) \
+- do { \
+- CHECK_LE((val1), (val2)+0.000000000000001L); \
+- CHECK_GE((val1), (val2)-0.000000000000001L); \
+- } while (0)
+-
+-#define CHECK_NEAR(val1, val2, margin) \
+- do { \
+- CHECK_LE((val1), (val2)+(margin)); \
+- CHECK_GE((val1), (val2)-(margin)); \
+- } while (0)
+-
+-// perror()..googly style!
+-//
+-// PLOG() and PLOG_IF() and PCHECK() behave exactly like their LOG* and
+-// CHECK equivalents with the addition that they postpend a description
+-// of the current state of errno to their output lines.
+-
+-#define PLOG(severity) GOOGLE_PLOG(severity, 0).stream()
+-
+-#define GOOGLE_PLOG(severity, counter) \
+- google::ErrnoLogMessage( \
+- __FILE__, __LINE__, google::GLOG_ ## severity, counter, \
+- &google::LogMessage::SendToLog)
+-
+-#define PLOG_IF(severity, condition) \
+- static_cast<void>(0), \
+- !(condition) ? (void) 0 : google::LogMessageVoidify() & PLOG(severity)
+-
+-// A CHECK() macro that postpends errno if the condition is false. E.g.
+-//
+-// if (poll(fds, nfds, timeout) == -1) { PCHECK(errno == EINTR); ... }
+-#define PCHECK(condition) \
+- PLOG_IF(FATAL, GOOGLE_PREDICT_BRANCH_NOT_TAKEN(!(condition))) \
+- << "Check failed: " #condition " "
+-
+-// A CHECK() macro that lets you assert the success of a function that
+-// returns -1 and sets errno in case of an error. E.g.
+-//
+-// CHECK_ERR(mkdir(path, 0700));
+-//
+-// or
+-//
+-// int fd = open(filename, flags); CHECK_ERR(fd) << ": open " << filename;
+-#define CHECK_ERR(invocation) \
+-PLOG_IF(FATAL, GOOGLE_PREDICT_BRANCH_NOT_TAKEN((invocation) == -1)) \
+- << #invocation
+-
+-// Use macro expansion to create, for each use of LOG_EVERY_N(), static
+-// variables with the __LINE__ expansion as part of the variable name.
+-#define LOG_EVERY_N_VARNAME(base, line) LOG_EVERY_N_VARNAME_CONCAT(base, line)
+-#define LOG_EVERY_N_VARNAME_CONCAT(base, line) base ## line
+-
+-#define LOG_OCCURRENCES LOG_EVERY_N_VARNAME(occurrences_, __LINE__)
+-#define LOG_OCCURRENCES_MOD_N LOG_EVERY_N_VARNAME(occurrences_mod_n_, __LINE__)
+-
+-#if 1 && __cplusplus >= 201103L
+-#define GLOG_CONSTEXPR constexpr
+-#else
+-#define GLOG_CONSTEXPR const
+-#endif
+-
+-#define LOG_TIME_PERIOD LOG_EVERY_N_VARNAME(timePeriod_, __LINE__)
+-#define LOG_PREVIOUS_TIME_RAW LOG_EVERY_N_VARNAME(previousTimeRaw_, __LINE__)
+-#define LOG_TIME_DELTA LOG_EVERY_N_VARNAME(deltaTime_, __LINE__)
+-#define LOG_CURRENT_TIME LOG_EVERY_N_VARNAME(currentTime_, __LINE__)
+-#define LOG_PREVIOUS_TIME LOG_EVERY_N_VARNAME(previousTime_, __LINE__)
+-
+-#if defined(__has_feature)
+-# if __has_feature(thread_sanitizer)
+-# define GLOG_SANITIZE_THREAD 1
+-# endif
+-#endif
+-
+-#if !defined(GLOG_SANITIZE_THREAD) && defined(__SANITIZE_THREAD__) && __SANITIZE_THREAD__
+-# define GLOG_SANITIZE_THREAD 1
+-#endif
+-
+-#if defined(GLOG_SANITIZE_THREAD)
+-#define GLOG_IFDEF_THREAD_SANITIZER(X) X
+-#else
+-#define GLOG_IFDEF_THREAD_SANITIZER(X)
+-#endif
+-
+-#if defined(GLOG_SANITIZE_THREAD)
+-} // namespace google
+-
+-// We need to identify the static variables as "benign" races
+-// to avoid noisy reports from TSAN.
+-extern "C" void AnnotateBenignRaceSized(
+- const char *file,
+- int line,
+- const volatile void *mem,
+- size_t size,
+- const char *description);
+-
+-namespace google {
+-#endif
+-
+-#if __cplusplus >= 201103L && 1 && 1 // Have <chrono> and <atomic>
+-#define SOME_KIND_OF_LOG_EVERY_T(severity, seconds) \
+- GLOG_CONSTEXPR std::chrono::nanoseconds LOG_TIME_PERIOD = std::chrono::duration_cast<std::chrono::nanoseconds>(std::chrono::duration<double>(seconds)); \
+- static std::atomic<google::int64> LOG_PREVIOUS_TIME_RAW; \
+- GLOG_IFDEF_THREAD_SANITIZER( \
+- AnnotateBenignRaceSized(__FILE__, __LINE__, &LOG_TIME_PERIOD, sizeof(google::int64), "")); \
+- GLOG_IFDEF_THREAD_SANITIZER( \
+- AnnotateBenignRaceSized(__FILE__, __LINE__, &LOG_PREVIOUS_TIME_RAW, sizeof(google::int64), "")); \
+- const auto LOG_CURRENT_TIME = std::chrono::duration_cast<std::chrono::nanoseconds>(std::chrono::steady_clock::now().time_since_epoch()); \
+- const auto LOG_PREVIOUS_TIME = LOG_PREVIOUS_TIME_RAW.load(std::memory_order_relaxed); \
+- const auto LOG_TIME_DELTA = LOG_CURRENT_TIME - std::chrono::nanoseconds(LOG_PREVIOUS_TIME); \
+- if (LOG_TIME_DELTA > LOG_TIME_PERIOD) \
+- LOG_PREVIOUS_TIME_RAW.store(std::chrono::duration_cast<std::chrono::nanoseconds>(LOG_CURRENT_TIME).count(), std::memory_order_relaxed); \
+- if (LOG_TIME_DELTA > LOG_TIME_PERIOD) google::LogMessage( \
+- __FILE__, __LINE__, google::GLOG_ ## severity).stream()
+-#elif defined(GLOG_OS_WINDOWS)
+-#define SOME_KIND_OF_LOG_EVERY_T(severity, seconds) \
+- GLOG_CONSTEXPR LONGLONG LOG_TIME_PERIOD = (seconds) * LONGLONG(1000000000); \
+- static LARGE_INTEGER LOG_PREVIOUS_TIME; \
+- LONGLONG LOG_TIME_DELTA; \
+- { \
+- LARGE_INTEGER currTime; \
+- LARGE_INTEGER freq; \
+- QueryPerformanceCounter(&currTime); \
+- QueryPerformanceFrequency(&freq); \
+- InterlockedCompareExchange64(&LOG_PREVIOUS_TIME.QuadPart, currTime.QuadPart, 0); \
+- LOG_TIME_DELTA = (currTime.QuadPart - LOG_PREVIOUS_TIME.QuadPart) * LONGLONG(1000000000) / freq.QuadPart; \
+- if (LOG_TIME_DELTA > LOG_TIME_PERIOD) InterlockedExchange64(&LOG_PREVIOUS_TIME.QuadPart, currTime.QuadPart); \
+- } \
+- if (LOG_TIME_DELTA > LOG_TIME_PERIOD) \
+- google::LogMessage( \
+- __FILE__, __LINE__, google::GLOG_ ## severity).stream()
+-#else
+-#define SOME_KIND_OF_LOG_EVERY_T(severity, seconds) \
+- GLOG_CONSTEXPR google::int64 LOG_TIME_PERIOD(seconds * 1000000000); \
+- static google::int64 LOG_PREVIOUS_TIME; \
+- google::int64 LOG_TIME_DELTA = 0; \
+- { \
+- timespec currentTime = {}; \
+- clock_gettime(CLOCK_MONOTONIC, ¤tTime); \
+- LOG_TIME_DELTA = (currentTime.tv_sec * 1000000000 + currentTime.tv_nsec) - LOG_PREVIOUS_TIME; \
+- } \
+- if (LOG_TIME_DELTA > LOG_TIME_PERIOD) __sync_add_and_fetch(&LOG_PREVIOUS_TIME, LOG_TIME_DELTA); \
+- if (LOG_TIME_DELTA > LOG_TIME_PERIOD) google::LogMessage( \
+- __FILE__, __LINE__, google::GLOG_ ## severity).stream()
+-#endif
+-
+-#if 1 && __cplusplus >= 201103L
+-#define SOME_KIND_OF_LOG_EVERY_N(severity, n, what_to_do) \
+- static std::atomic<int> LOG_OCCURRENCES(0), LOG_OCCURRENCES_MOD_N(0); \
+- GLOG_IFDEF_THREAD_SANITIZER(AnnotateBenignRaceSized(__FILE__, __LINE__, &LOG_OCCURRENCES, sizeof(int), "")); \
+- GLOG_IFDEF_THREAD_SANITIZER(AnnotateBenignRaceSized(__FILE__, __LINE__, &LOG_OCCURRENCES_MOD_N, sizeof(int), "")); \
+- ++LOG_OCCURRENCES; \
+- if (++LOG_OCCURRENCES_MOD_N > n) LOG_OCCURRENCES_MOD_N -= n; \
+- if (LOG_OCCURRENCES_MOD_N == 1) \
+- google::LogMessage( \
+- __FILE__, __LINE__, google::GLOG_ ## severity, LOG_OCCURRENCES, \
+- &what_to_do).stream()
+-
+-#define SOME_KIND_OF_LOG_IF_EVERY_N(severity, condition, n, what_to_do) \
+- static std::atomic<int> LOG_OCCURRENCES(0), LOG_OCCURRENCES_MOD_N(0); \
+- GLOG_IFDEF_THREAD_SANITIZER(AnnotateBenignRaceSized(__FILE__, __LINE__, &LOG_OCCURRENCES, sizeof(int), "")); \
+- GLOG_IFDEF_THREAD_SANITIZER(AnnotateBenignRaceSized(__FILE__, __LINE__, &LOG_OCCURRENCES_MOD_N, sizeof(int), "")); \
+- ++LOG_OCCURRENCES; \
+- if ((condition) && \
+- ((LOG_OCCURRENCES_MOD_N=(LOG_OCCURRENCES_MOD_N + 1) % n) == (1 % n))) \
+- google::LogMessage( \
+- __FILE__, __LINE__, google::GLOG_ ## severity, LOG_OCCURRENCES, \
+- &what_to_do).stream()
+-
+-#define SOME_KIND_OF_PLOG_EVERY_N(severity, n, what_to_do) \
+- static std::atomic<int> LOG_OCCURRENCES(0), LOG_OCCURRENCES_MOD_N(0); \
+- GLOG_IFDEF_THREAD_SANITIZER(AnnotateBenignRaceSized(__FILE__, __LINE__, &LOG_OCCURRENCES, sizeof(int), "")); \
+- GLOG_IFDEF_THREAD_SANITIZER(AnnotateBenignRaceSized(__FILE__, __LINE__, &LOG_OCCURRENCES_MOD_N, sizeof(int), "")); \
+- ++LOG_OCCURRENCES; \
+- if (++LOG_OCCURRENCES_MOD_N > n) LOG_OCCURRENCES_MOD_N -= n; \
+- if (LOG_OCCURRENCES_MOD_N == 1) \
+- google::ErrnoLogMessage( \
+- __FILE__, __LINE__, google::GLOG_ ## severity, LOG_OCCURRENCES, \
+- &what_to_do).stream()
+-
+-#define SOME_KIND_OF_LOG_FIRST_N(severity, n, what_to_do) \
+- static std::atomic<int> LOG_OCCURRENCES(0); \
+- GLOG_IFDEF_THREAD_SANITIZER(AnnotateBenignRaceSized(__FILE__, __LINE__, &LOG_OCCURRENCES, sizeof(int), "")); \
+- if (LOG_OCCURRENCES <= n) \
+- ++LOG_OCCURRENCES; \
+- if (LOG_OCCURRENCES <= n) \
+- google::LogMessage( \
+- __FILE__, __LINE__, google::GLOG_ ## severity, LOG_OCCURRENCES, \
+- &what_to_do).stream()
+-
+-#elif defined(GLOG_OS_WINDOWS)
+-
+-#define SOME_KIND_OF_LOG_EVERY_N(severity, n, what_to_do) \
+- static volatile unsigned LOG_OCCURRENCES = 0; \
+- static volatile unsigned LOG_OCCURRENCES_MOD_N = 0; \
+- InterlockedIncrement(&LOG_OCCURRENCES); \
+- if (InterlockedIncrement(&LOG_OCCURRENCES_MOD_N) > n) \
+- InterlockedExchangeSubtract(&LOG_OCCURRENCES_MOD_N, n); \
+- if (LOG_OCCURRENCES_MOD_N == 1) \
+- google::LogMessage( \
+- __FILE__, __LINE__, google::GLOG_ ## severity, LOG_OCCURRENCES, \
+- &what_to_do).stream()
+-
+-#define SOME_KIND_OF_LOG_IF_EVERY_N(severity, condition, n, what_to_do) \
+- static volatile unsigned LOG_OCCURRENCES = 0; \
+- static volatile unsigned LOG_OCCURRENCES_MOD_N = 0; \
+- InterlockedIncrement(&LOG_OCCURRENCES); \
+- if ((condition) && \
+- ((InterlockedIncrement(&LOG_OCCURRENCES_MOD_N), \
+- (LOG_OCCURRENCES_MOD_N > n && InterlockedExchangeSubtract(&LOG_OCCURRENCES_MOD_N, n))), \
+- LOG_OCCURRENCES_MOD_N == 1)) \
+- google::LogMessage( \
+- __FILE__, __LINE__, google::GLOG_ ## severity, LOG_OCCURRENCES, \
+- &what_to_do).stream()
+-
+-#define SOME_KIND_OF_PLOG_EVERY_N(severity, n, what_to_do) \
+- static volatile unsigned LOG_OCCURRENCES = 0; \
+- static volatile unsigned LOG_OCCURRENCES_MOD_N = 0; \
+- InterlockedIncrement(&LOG_OCCURRENCES); \
+- if (InterlockedIncrement(&LOG_OCCURRENCES_MOD_N) > n) \
+- InterlockedExchangeSubtract(&LOG_OCCURRENCES_MOD_N, n); \
+- if (LOG_OCCURRENCES_MOD_N == 1) \
+- google::ErrnoLogMessage( \
+- __FILE__, __LINE__, google::GLOG_ ## severity, LOG_OCCURRENCES, \
+- &what_to_do).stream()
+-
+-#define SOME_KIND_OF_LOG_FIRST_N(severity, n, what_to_do) \
+- static volatile unsigned LOG_OCCURRENCES = 0; \
+- if (LOG_OCCURRENCES <= n) \
+- InterlockedIncrement(&LOG_OCCURRENCES); \
+- if (LOG_OCCURRENCES <= n) \
+- google::LogMessage( \
+- __FILE__, __LINE__, google::GLOG_ ## severity, LOG_OCCURRENCES, \
+- &what_to_do).stream()
+-
+-#else
+-
+-#define SOME_KIND_OF_LOG_EVERY_N(severity, n, what_to_do) \
+- static int LOG_OCCURRENCES = 0, LOG_OCCURRENCES_MOD_N = 0; \
+- __sync_add_and_fetch(&LOG_OCCURRENCES, 1); \
+- if (__sync_add_and_fetch(&LOG_OCCURRENCES_MOD_N, 1) > n) \
+- __sync_sub_and_fetch(&LOG_OCCURRENCES_MOD_N, n); \
+- if (LOG_OCCURRENCES_MOD_N == 1) \
+- google::LogMessage( \
+- __FILE__, __LINE__, google::GLOG_ ## severity, LOG_OCCURRENCES, \
+- &what_to_do).stream()
+-
+-#define SOME_KIND_OF_LOG_IF_EVERY_N(severity, condition, n, what_to_do) \
+- static int LOG_OCCURRENCES = 0, LOG_OCCURRENCES_MOD_N = 0; \
+- __sync_add_and_fetch(&LOG_OCCURRENCES, 1); \
+- if ((condition) && \
+- (__sync_add_and_fetch(&LOG_OCCURRENCES_MOD_N, 1) || true) && \
+- ((LOG_OCCURRENCES_MOD_N >= n && __sync_sub_and_fetch(&LOG_OCCURRENCES_MOD_N, n)) || true) && \
+- LOG_OCCURRENCES_MOD_N == (1 % n)) \
+- google::LogMessage( \
+- __FILE__, __LINE__, google::GLOG_ ## severity, LOG_OCCURRENCES, \
+- &what_to_do).stream()
+-
+-#define SOME_KIND_OF_PLOG_EVERY_N(severity, n, what_to_do) \
+- static int LOG_OCCURRENCES = 0, LOG_OCCURRENCES_MOD_N = 0; \
+- __sync_add_and_fetch(&LOG_OCCURRENCES, 1); \
+- if (__sync_add_and_fetch(&LOG_OCCURRENCES_MOD_N, 1) > n) \
+- __sync_sub_and_fetch(&LOG_OCCURRENCES_MOD_N, n); \
+- if (LOG_OCCURRENCES_MOD_N == 1) \
+- google::ErrnoLogMessage( \
+- __FILE__, __LINE__, google::GLOG_ ## severity, LOG_OCCURRENCES, \
+- &what_to_do).stream()
+-
+-#define SOME_KIND_OF_LOG_FIRST_N(severity, n, what_to_do) \
+- static int LOG_OCCURRENCES = 0; \
+- if (LOG_OCCURRENCES <= n) \
+- __sync_add_and_fetch(&LOG_OCCURRENCES, 1); \
+- if (LOG_OCCURRENCES <= n) \
+- google::LogMessage( \
+- __FILE__, __LINE__, google::GLOG_ ## severity, LOG_OCCURRENCES, \
+- &what_to_do).stream()
+-#endif
+-
+-namespace glog_internal_namespace_ {
+-template <bool>
+-struct CompileAssert {
+-};
+-struct CrashReason;
+-
+-// Returns true if FailureSignalHandler is installed.
+-// Needs to be exported since it's used by the signalhandler_unittest.
+-GLOG_EXPORT bool IsFailureSignalHandlerInstalled();
+-} // namespace glog_internal_namespace_
+-
+-#define LOG_EVERY_N(severity, n) \
+- SOME_KIND_OF_LOG_EVERY_N(severity, (n), google::LogMessage::SendToLog)
+-
+-#define LOG_EVERY_T(severity, T) SOME_KIND_OF_LOG_EVERY_T(severity, (T))
+-
+-#define SYSLOG_EVERY_N(severity, n) \
+- SOME_KIND_OF_LOG_EVERY_N(severity, (n), google::LogMessage::SendToSyslogAndLog)
+-
+-#define PLOG_EVERY_N(severity, n) \
+- SOME_KIND_OF_PLOG_EVERY_N(severity, (n), google::LogMessage::SendToLog)
+-
+-#define LOG_FIRST_N(severity, n) \
+- SOME_KIND_OF_LOG_FIRST_N(severity, (n), google::LogMessage::SendToLog)
+-
+-#define LOG_IF_EVERY_N(severity, condition, n) \
+- SOME_KIND_OF_LOG_IF_EVERY_N(severity, (condition), (n), google::LogMessage::SendToLog)
+-
+-// We want the special COUNTER value available for LOG_EVERY_X()'ed messages
+-enum PRIVATE_Counter {COUNTER};
+-
+-#ifdef GLOG_NO_ABBREVIATED_SEVERITIES
+-// wingdi.h defines ERROR to be 0. When we call LOG(ERROR), it gets
+-// substituted with 0, and it expands to COMPACT_GOOGLE_LOG_0. To allow us
+-// to keep using this syntax, we define this macro to do the same thing
+-// as COMPACT_GOOGLE_LOG_ERROR.
+-#define COMPACT_GOOGLE_LOG_0 COMPACT_GOOGLE_LOG_ERROR
+-#define SYSLOG_0 SYSLOG_ERROR
+-#define LOG_TO_STRING_0 LOG_TO_STRING_ERROR
+-// Needed for LOG_IS_ON(ERROR).
+-const LogSeverity GLOG_0 = GLOG_ERROR;
+-#else
+-// Users may include windows.h after logging.h without
+-// GLOG_NO_ABBREVIATED_SEVERITIES nor WIN32_LEAN_AND_MEAN.
+-// For this case, we cannot detect if ERROR is defined before users
+-// actually use ERROR. Let's make an undefined symbol to warn users.
+-# define GLOG_ERROR_MSG ERROR_macro_is_defined_Define_GLOG_NO_ABBREVIATED_SEVERITIES_before_including_logging_h_See_the_document_for_detail
+-# define COMPACT_GOOGLE_LOG_0 GLOG_ERROR_MSG
+-# define SYSLOG_0 GLOG_ERROR_MSG
+-# define LOG_TO_STRING_0 GLOG_ERROR_MSG
+-# define GLOG_0 GLOG_ERROR_MSG
+-#endif
+-
+-// Plus some debug-logging macros that get compiled to nothing for production
+-
+-#if DCHECK_IS_ON()
+-
+-#define DLOG(severity) LOG(severity)
+-#define DVLOG(verboselevel) VLOG(verboselevel)
+-#define DLOG_IF(severity, condition) LOG_IF(severity, condition)
+-#define DLOG_EVERY_N(severity, n) LOG_EVERY_N(severity, n)
+-#define DLOG_IF_EVERY_N(severity, condition, n) \
+- LOG_IF_EVERY_N(severity, condition, n)
+-#define DLOG_ASSERT(condition) LOG_ASSERT(condition)
+-
+-// debug-only checking. executed if DCHECK_IS_ON().
+-#define DCHECK(condition) CHECK(condition)
+-#define DCHECK_EQ(val1, val2) CHECK_EQ(val1, val2)
+-#define DCHECK_NE(val1, val2) CHECK_NE(val1, val2)
+-#define DCHECK_LE(val1, val2) CHECK_LE(val1, val2)
+-#define DCHECK_LT(val1, val2) CHECK_LT(val1, val2)
+-#define DCHECK_GE(val1, val2) CHECK_GE(val1, val2)
+-#define DCHECK_GT(val1, val2) CHECK_GT(val1, val2)
+-#define DCHECK_NOTNULL(val) CHECK_NOTNULL(val)
+-#define DCHECK_STREQ(str1, str2) CHECK_STREQ(str1, str2)
+-#define DCHECK_STRCASEEQ(str1, str2) CHECK_STRCASEEQ(str1, str2)
+-#define DCHECK_STRNE(str1, str2) CHECK_STRNE(str1, str2)
+-#define DCHECK_STRCASENE(str1, str2) CHECK_STRCASENE(str1, str2)
+-
+-#else // !DCHECK_IS_ON()
+-
+-#define DLOG(severity) \
+- static_cast<void>(0), \
+- true ? (void) 0 : google::LogMessageVoidify() & LOG(severity)
+-
+-#define DVLOG(verboselevel) \
+- static_cast<void>(0), \
+- (true || !VLOG_IS_ON(verboselevel)) ? \
+- (void) 0 : google::LogMessageVoidify() & LOG(INFO)
+-
+-#define DLOG_IF(severity, condition) \
+- static_cast<void>(0), \
+- (true || !(condition)) ? (void) 0 : google::LogMessageVoidify() & LOG(severity)
+-
+-#define DLOG_EVERY_N(severity, n) \
+- static_cast<void>(0), \
+- true ? (void) 0 : google::LogMessageVoidify() & LOG(severity)
+-
+-#define DLOG_IF_EVERY_N(severity, condition, n) \
+- static_cast<void>(0), \
+- (true || !(condition))? (void) 0 : google::LogMessageVoidify() & LOG(severity)
+-
+-#define DLOG_ASSERT(condition) \
+- static_cast<void>(0), \
+- true ? (void) 0 : LOG_ASSERT(condition)
+-
+-// MSVC warning C4127: conditional expression is constant
+-#define DCHECK(condition) \
+- GLOG_MSVC_PUSH_DISABLE_WARNING(4127) \
+- while (false) \
+- GLOG_MSVC_POP_WARNING() CHECK(condition)
+-
+-#define DCHECK_EQ(val1, val2) \
+- GLOG_MSVC_PUSH_DISABLE_WARNING(4127) \
+- while (false) \
+- GLOG_MSVC_POP_WARNING() CHECK_EQ(val1, val2)
+-
+-#define DCHECK_NE(val1, val2) \
+- GLOG_MSVC_PUSH_DISABLE_WARNING(4127) \
+- while (false) \
+- GLOG_MSVC_POP_WARNING() CHECK_NE(val1, val2)
+-
+-#define DCHECK_LE(val1, val2) \
+- GLOG_MSVC_PUSH_DISABLE_WARNING(4127) \
+- while (false) \
+- GLOG_MSVC_POP_WARNING() CHECK_LE(val1, val2)
+-
+-#define DCHECK_LT(val1, val2) \
+- GLOG_MSVC_PUSH_DISABLE_WARNING(4127) \
+- while (false) \
+- GLOG_MSVC_POP_WARNING() CHECK_LT(val1, val2)
+-
+-#define DCHECK_GE(val1, val2) \
+- GLOG_MSVC_PUSH_DISABLE_WARNING(4127) \
+- while (false) \
+- GLOG_MSVC_POP_WARNING() CHECK_GE(val1, val2)
+-
+-#define DCHECK_GT(val1, val2) \
+- GLOG_MSVC_PUSH_DISABLE_WARNING(4127) \
+- while (false) \
+- GLOG_MSVC_POP_WARNING() CHECK_GT(val1, val2)
+-
+-// You may see warnings in release mode if you don't use the return
+-// value of DCHECK_NOTNULL. Please just use DCHECK for such cases.
+-#define DCHECK_NOTNULL(val) (val)
+-
+-#define DCHECK_STREQ(str1, str2) \
+- GLOG_MSVC_PUSH_DISABLE_WARNING(4127) \
+- while (false) \
+- GLOG_MSVC_POP_WARNING() CHECK_STREQ(str1, str2)
+-
+-#define DCHECK_STRCASEEQ(str1, str2) \
+- GLOG_MSVC_PUSH_DISABLE_WARNING(4127) \
+- while (false) \
+- GLOG_MSVC_POP_WARNING() CHECK_STRCASEEQ(str1, str2)
+-
+-#define DCHECK_STRNE(str1, str2) \
+- GLOG_MSVC_PUSH_DISABLE_WARNING(4127) \
+- while (false) \
+- GLOG_MSVC_POP_WARNING() CHECK_STRNE(str1, str2)
+-
+-#define DCHECK_STRCASENE(str1, str2) \
+- GLOG_MSVC_PUSH_DISABLE_WARNING(4127) \
+- while (false) \
+- GLOG_MSVC_POP_WARNING() CHECK_STRCASENE(str1, str2)
+-
+-#endif // DCHECK_IS_ON()
+-
+-// Log only in verbose mode.
+-
+-#define VLOG(verboselevel) LOG_IF(INFO, VLOG_IS_ON(verboselevel))
+-
+-#define VLOG_IF(verboselevel, condition) \
+- LOG_IF(INFO, (condition) && VLOG_IS_ON(verboselevel))
+-
+-#define VLOG_EVERY_N(verboselevel, n) \
+- LOG_IF_EVERY_N(INFO, VLOG_IS_ON(verboselevel), n)
+-
+-#define VLOG_IF_EVERY_N(verboselevel, condition, n) \
+- LOG_IF_EVERY_N(INFO, (condition) && VLOG_IS_ON(verboselevel), n)
+-
+-namespace base_logging {
+-
+-// LogMessage::LogStream is a std::ostream backed by this streambuf.
+-// This class ignores overflow and leaves two bytes at the end of the
+-// buffer to allow for a '\n' and '\0'.
+-class GLOG_EXPORT LogStreamBuf : public std::streambuf {
+- public:
+- // REQUIREMENTS: "len" must be >= 2 to account for the '\n' and '\0'.
+- LogStreamBuf(char *buf, int len) {
+- setp(buf, buf + len - 2);
+- }
+-
+- // This effectively ignores overflow.
+- int_type overflow(int_type ch) {
+- return ch;
+- }
+-
+- // Legacy public ostrstream method.
+- size_t pcount() const { return static_cast<size_t>(pptr() - pbase()); }
+- char* pbase() const { return std::streambuf::pbase(); }
+-};
+-
+-} // namespace base_logging
+-
+-//
+-// This class more or less represents a particular log message. You
+-// create an instance of LogMessage and then stream stuff to it.
+-// When you finish streaming to it, ~LogMessage is called and the
+-// full message gets streamed to the appropriate destination.
+-//
+-// You shouldn't actually use LogMessage's constructor to log things,
+-// though. You should use the LOG() macro (and variants thereof)
+-// above.
+-class GLOG_EXPORT LogMessage {
+-public:
+- enum {
+- // Passing kNoLogPrefix for the line number disables the
+- // log-message prefix. Useful for using the LogMessage
+- // infrastructure as a printing utility. See also the --log_prefix
+- // flag for controlling the log-message prefix on an
+- // application-wide basis.
+- kNoLogPrefix = -1
+- };
+-
+- // LogStream inherit from non-DLL-exported class (std::ostrstream)
+- // and VC++ produces a warning for this situation.
+- // However, MSDN says "C4275 can be ignored in Microsoft Visual C++
+- // 2005 if you are deriving from a type in the Standard C++ Library"
+- // http://msdn.microsoft.com/en-us/library/3tdb471s(VS.80).aspx
+- // Let's just ignore the warning.
+-GLOG_MSVC_PUSH_DISABLE_WARNING(4275)
+- class GLOG_EXPORT LogStream : public std::ostream {
+-GLOG_MSVC_POP_WARNING()
+- public:
+- LogStream(char *buf, int len, int64 ctr)
+- : std::ostream(NULL),
+- streambuf_(buf, len),
+- ctr_(ctr),
+- self_(this) {
+- rdbuf(&streambuf_);
+- }
+-
+- int64 ctr() const { return ctr_; }
+- void set_ctr(int64 ctr) { ctr_ = ctr; }
+- LogStream* self() const { return self_; }
+-
+- // Legacy std::streambuf methods.
+- size_t pcount() const { return streambuf_.pcount(); }
+- char* pbase() const { return streambuf_.pbase(); }
+- char* str() const { return pbase(); }
+-
+- private:
+- LogStream(const LogStream&);
+- LogStream& operator=(const LogStream&);
+- base_logging::LogStreamBuf streambuf_;
+- int64 ctr_; // Counter hack (for the LOG_EVERY_X() macro)
+- LogStream *self_; // Consistency check hack
+- };
+-
+-public:
+- // icc 8 requires this typedef to avoid an internal compiler error.
+- typedef void (LogMessage::*SendMethod)();
+-
+- LogMessage(const char* file, int line, LogSeverity severity, int64 ctr,
+- SendMethod send_method);
+-
+- // Two special constructors that generate reduced amounts of code at
+- // LOG call sites for common cases.
+-
+- // Used for LOG(INFO): Implied are:
+- // severity = INFO, ctr = 0, send_method = &LogMessage::SendToLog.
+- //
+- // Using this constructor instead of the more complex constructor above
+- // saves 19 bytes per call site.
+- LogMessage(const char* file, int line);
+-
+- // Used for LOG(severity) where severity != INFO. Implied
+- // are: ctr = 0, send_method = &LogMessage::SendToLog
+- //
+- // Using this constructor instead of the more complex constructor above
+- // saves 17 bytes per call site.
+- LogMessage(const char* file, int line, LogSeverity severity);
+-
+- // Constructor to log this message to a specified sink (if not NULL).
+- // Implied are: ctr = 0, send_method = &LogMessage::SendToSinkAndLog if
+- // also_send_to_log is true, send_method = &LogMessage::SendToSink otherwise.
+- LogMessage(const char* file, int line, LogSeverity severity, LogSink* sink,
+- bool also_send_to_log);
+-
+- // Constructor where we also give a vector<string> pointer
+- // for storing the messages (if the pointer is not NULL).
+- // Implied are: ctr = 0, send_method = &LogMessage::SaveOrSendToLog.
+- LogMessage(const char* file, int line, LogSeverity severity,
+- std::vector<std::string>* outvec);
+-
+- // Constructor where we also give a string pointer for storing the
+- // message (if the pointer is not NULL). Implied are: ctr = 0,
+- // send_method = &LogMessage::WriteToStringAndLog.
+- LogMessage(const char* file, int line, LogSeverity severity,
+- std::string* message);
+-
+- // A special constructor used for check failures
+- LogMessage(const char* file, int line, const CheckOpString& result);
+-
+- ~LogMessage();
+-
+- // Flush a buffered message to the sink set in the constructor. Always
+- // called by the destructor, it may also be called from elsewhere if
+- // needed. Only the first call is actioned; any later ones are ignored.
+- void Flush();
+-
+- // An arbitrary limit on the length of a single log message. This
+- // is so that streaming can be done more efficiently.
+- static const size_t kMaxLogMessageLen;
+-
+- // Theses should not be called directly outside of logging.*,
+- // only passed as SendMethod arguments to other LogMessage methods:
+- void SendToLog(); // Actually dispatch to the logs
+- void SendToSyslogAndLog(); // Actually dispatch to syslog and the logs
+-
+- // Call abort() or similar to perform LOG(FATAL) crash.
+- static void __attribute__((noreturn)) Fail();
+-
+- std::ostream& stream();
+-
+- int preserved_errno() const;
+-
+- // Must be called without the log_mutex held. (L < log_mutex)
+- static int64 num_messages(int severity);
+-
+- const LogMessageTime& getLogMessageTime() const;
+-
+- struct LogMessageData;
+-
+-private:
+- // Fully internal SendMethod cases:
+- void SendToSinkAndLog(); // Send to sink if provided and dispatch to the logs
+- void SendToSink(); // Send to sink if provided, do nothing otherwise.
+-
+- // Write to string if provided and dispatch to the logs.
+- void WriteToStringAndLog();
+-
+- void SaveOrSendToLog(); // Save to stringvec if provided, else to logs
+-
+- void Init(const char* file, int line, LogSeverity severity,
+- void (LogMessage::*send_method)());
+-
+- // Used to fill in crash information during LOG(FATAL) failures.
+- void RecordCrashReason(glog_internal_namespace_::CrashReason* reason);
+-
+- // Counts of messages sent at each priority:
+- static int64 num_messages_[NUM_SEVERITIES]; // under log_mutex
+-
+- // We keep the data in a separate struct so that each instance of
+- // LogMessage uses less stack space.
+- LogMessageData* allocated_;
+- LogMessageData* data_;
+- LogMessageTime logmsgtime_;
+-
+- friend class LogDestination;
+-
+- LogMessage(const LogMessage&);
+- void operator=(const LogMessage&);
+-};
+-
+-// This class happens to be thread-hostile because all instances share
+-// a single data buffer, but since it can only be created just before
+-// the process dies, we don't worry so much.
+-class GLOG_EXPORT LogMessageFatal : public LogMessage {
+- public:
+- LogMessageFatal(const char* file, int line);
+- LogMessageFatal(const char* file, int line, const CheckOpString& result);
+- __attribute__((noreturn)) ~LogMessageFatal();
+-};
+-
+-// A non-macro interface to the log facility; (useful
+-// when the logging level is not a compile-time constant).
+-inline void LogAtLevel(int const severity, std::string const &msg) {
+- LogMessage(__FILE__, __LINE__, severity).stream() << msg;
+-}
+-
+-// A macro alternative of LogAtLevel. New code may want to use this
+-// version since there are two advantages: 1. this version outputs the
+-// file name and the line number where this macro is put like other
+-// LOG macros, 2. this macro can be used as C++ stream.
+-#define LOG_AT_LEVEL(severity) google::LogMessage(__FILE__, __LINE__, severity).stream()
+-
+-// Check if it's compiled in C++11 mode.
+-//
+-// GXX_EXPERIMENTAL_CXX0X is defined by gcc and clang up to at least
+-// gcc-4.7 and clang-3.1 (2011-12-13). __cplusplus was defined to 1
+-// in gcc before 4.7 (Crosstool 16) and clang before 3.1, but is
+-// defined according to the language version in effect thereafter.
+-// Microsoft Visual Studio 14 (2015) sets __cplusplus==199711 despite
+-// reasonably good C++11 support, so we set LANG_CXX for it and
+-// newer versions (_MSC_VER >= 1900).
+-#if (defined(__GXX_EXPERIMENTAL_CXX0X__) || __cplusplus >= 201103L || \
+- (defined(_MSC_VER) && _MSC_VER >= 1900)) && !defined(__UCLIBCXX_MAJOR__)
+-// Helper for CHECK_NOTNULL().
+-//
+-// In C++11, all cases can be handled by a single function. Since the value
+-// category of the argument is preserved (also for rvalue references),
+-// member initializer lists like the one below will compile correctly:
+-//
+-// Foo()
+-// : x_(CHECK_NOTNULL(MethodReturningUniquePtr())) {}
+-template <typename T>
+-T CheckNotNull(const char* file, int line, const char* names, T&& t) {
+- if (t == nullptr) {
+- LogMessageFatal(file, line, new std::string(names));
+- }
+- return std::forward<T>(t);
+-}
+-
+-#else
+-
+-// A small helper for CHECK_NOTNULL().
+-template <typename T>
+-T* CheckNotNull(const char *file, int line, const char *names, T* t) {
+- if (t == NULL) {
+- LogMessageFatal(file, line, new std::string(names));
+- }
+- return t;
+-}
+-#endif
+-
+-// Allow folks to put a counter in the LOG_EVERY_X()'ed messages. This
+-// only works if ostream is a LogStream. If the ostream is not a
+-// LogStream you'll get an assert saying as much at runtime.
+-GLOG_EXPORT std::ostream& operator<<(std::ostream &os,
+- const PRIVATE_Counter&);
+-
+-
+-// Derived class for PLOG*() above.
+-class GLOG_EXPORT ErrnoLogMessage : public LogMessage {
+- public:
+- ErrnoLogMessage(const char* file, int line, LogSeverity severity, int64 ctr,
+- void (LogMessage::*send_method)());
+-
+- // Postpends ": strerror(errno) [errno]".
+- ~ErrnoLogMessage();
+-
+- private:
+- ErrnoLogMessage(const ErrnoLogMessage&);
+- void operator=(const ErrnoLogMessage&);
+-};
+-
+-
+-// This class is used to explicitly ignore values in the conditional
+-// logging macros. This avoids compiler warnings like "value computed
+-// is not used" and "statement has no effect".
+-
+-class GLOG_EXPORT LogMessageVoidify {
+- public:
+- LogMessageVoidify() { }
+- // This has to be an operator with a precedence lower than << but
+- // higher than ?:
+- void operator&(std::ostream&) { }
+-};
+-
+-
+-// Flushes all log files that contains messages that are at least of
+-// the specified severity level. Thread-safe.
+-GLOG_EXPORT void FlushLogFiles(LogSeverity min_severity);
+-
+-// Flushes all log files that contains messages that are at least of
+-// the specified severity level. Thread-hostile because it ignores
+-// locking -- used for catastrophic failures.
+-GLOG_EXPORT void FlushLogFilesUnsafe(LogSeverity min_severity);
+-
+-//
+-// Set the destination to which a particular severity level of log
+-// messages is sent. If base_filename is "", it means "don't log this
+-// severity". Thread-safe.
+-//
+-GLOG_EXPORT void SetLogDestination(LogSeverity severity,
+- const char* base_filename);
+-
+-//
+-// Set the basename of the symlink to the latest log file at a given
+-// severity. If symlink_basename is empty, do not make a symlink. If
+-// you don't call this function, the symlink basename is the
+-// invocation name of the program. Thread-safe.
+-//
+-GLOG_EXPORT void SetLogSymlink(LogSeverity severity,
+- const char* symlink_basename);
+-
+-//
+-// Used to send logs to some other kind of destination
+-// Users should subclass LogSink and override send to do whatever they want.
+-// Implementations must be thread-safe because a shared instance will
+-// be called from whichever thread ran the LOG(XXX) line.
+-class GLOG_EXPORT LogSink {
+- public:
+- virtual ~LogSink();
+-
+- // Sink's logging logic (message_len is such as to exclude '\n' at the end).
+- // This method can't use LOG() or CHECK() as logging system mutex(s) are held
+- // during this call.
+- virtual void send(LogSeverity severity, const char* full_filename,
+- const char* base_filename, int line,
+- const LogMessageTime& logmsgtime, const char* message,
+- size_t message_len);
+- // Provide an overload for compatibility purposes
+- GLOG_DEPRECATED
+- virtual void send(LogSeverity severity, const char* full_filename,
+- const char* base_filename, int line, const std::tm* t,
+- const char* message, size_t message_len);
+-
+- // Redefine this to implement waiting for
+- // the sink's logging logic to complete.
+- // It will be called after each send() returns,
+- // but before that LogMessage exits or crashes.
+- // By default this function does nothing.
+- // Using this function one can implement complex logic for send()
+- // that itself involves logging; and do all this w/o causing deadlocks and
+- // inconsistent rearrangement of log messages.
+- // E.g. if a LogSink has thread-specific actions, the send() method
+- // can simply add the message to a queue and wake up another thread that
+- // handles real logging while itself making some LOG() calls;
+- // WaitTillSent() can be implemented to wait for that logic to complete.
+- // See our unittest for an example.
+- virtual void WaitTillSent();
+-
+- // Returns the normal text output of the log message.
+- // Can be useful to implement send().
+- static std::string ToString(LogSeverity severity, const char* file, int line,
+- const LogMessageTime &logmsgtime,
+- const char* message, size_t message_len);
+-};
+-
+-// Add or remove a LogSink as a consumer of logging data. Thread-safe.
+-GLOG_EXPORT void AddLogSink(LogSink *destination);
+-GLOG_EXPORT void RemoveLogSink(LogSink *destination);
+-
+-//
+-// Specify an "extension" added to the filename specified via
+-// SetLogDestination. This applies to all severity levels. It's
+-// often used to append the port we're listening on to the logfile
+-// name. Thread-safe.
+-//
+-GLOG_EXPORT void SetLogFilenameExtension(
+- const char* filename_extension);
+-
+-//
+-// Make it so that all log messages of at least a particular severity
+-// are logged to stderr (in addition to logging to the usual log
+-// file(s)). Thread-safe.
+-//
+-GLOG_EXPORT void SetStderrLogging(LogSeverity min_severity);
+-
+-//
+-// Make it so that all log messages go only to stderr. Thread-safe.
+-//
+-GLOG_EXPORT void LogToStderr();
+-
+-//
+-// Make it so that all log messages of at least a particular severity are
+-// logged via email to a list of addresses (in addition to logging to the
+-// usual log file(s)). The list of addresses is just a string containing
+-// the email addresses to send to (separated by spaces, say). Thread-safe.
+-//
+-GLOG_EXPORT void SetEmailLogging(LogSeverity min_severity,
+- const char* addresses);
+-
+-// A simple function that sends email. dest is a commma-separated
+-// list of addressess. Thread-safe.
+-GLOG_EXPORT bool SendEmail(const char* dest, const char* subject,
+- const char* body);
+-
+-GLOG_EXPORT const std::vector<std::string>& GetLoggingDirectories();
+-
+-// For tests only: Clear the internal [cached] list of logging directories to
+-// force a refresh the next time GetLoggingDirectories is called.
+-// Thread-hostile.
+-void TestOnly_ClearLoggingDirectoriesList();
+-
+-// Returns a set of existing temporary directories, which will be a
+-// subset of the directories returned by GetLoggingDirectories().
+-// Thread-safe.
+-GLOG_EXPORT void GetExistingTempDirectories(
+- std::vector<std::string>* list);
+-
+-// Print any fatal message again -- useful to call from signal handler
+-// so that the last thing in the output is the fatal message.
+-// Thread-hostile, but a race is unlikely.
+-GLOG_EXPORT void ReprintFatalMessage();
+-
+-// Truncate a log file that may be the append-only output of multiple
+-// processes and hence can't simply be renamed/reopened (typically a
+-// stdout/stderr). If the file "path" is > "limit" bytes, copy the
+-// last "keep" bytes to offset 0 and truncate the rest. Since we could
+-// be racing with other writers, this approach has the potential to
+-// lose very small amounts of data. For security, only follow symlinks
+-// if the path is /proc/self/fd/*
+-GLOG_EXPORT void TruncateLogFile(const char* path, uint64 limit, uint64 keep);
+-
+-// Truncate stdout and stderr if they are over the value specified by
+-// --max_log_size; keep the final 1MB. This function has the same
+-// race condition as TruncateLogFile.
+-GLOG_EXPORT void TruncateStdoutStderr();
+-
+-// Return the string representation of the provided LogSeverity level.
+-// Thread-safe.
+-GLOG_EXPORT const char* GetLogSeverityName(LogSeverity severity);
+-
+-// ---------------------------------------------------------------------
+-// Implementation details that are not useful to most clients
+-// ---------------------------------------------------------------------
+-
+-// A Logger is the interface used by logging modules to emit entries
+-// to a log. A typical implementation will dump formatted data to a
+-// sequence of files. We also provide interfaces that will forward
+-// the data to another thread so that the invoker never blocks.
+-// Implementations should be thread-safe since the logging system
+-// will write to them from multiple threads.
+-
+-namespace base {
+-
+-class GLOG_EXPORT Logger {
+- public:
+- virtual ~Logger();
+-
+- // Writes "message[0,message_len-1]" corresponding to an event that
+- // occurred at "timestamp". If "force_flush" is true, the log file
+- // is flushed immediately.
+- //
+- // The input message has already been formatted as deemed
+- // appropriate by the higher level logging facility. For example,
+- // textual log messages already contain timestamps, and the
+- // file:linenumber header.
+- virtual void Write(bool force_flush,
+- time_t timestamp,
+- const char* message,
+- size_t message_len) = 0;
+-
+- // Flush any buffered messages
+- virtual void Flush() = 0;
+-
+- // Get the current LOG file size.
+- // The returned value is approximate since some
+- // logged data may not have been flushed to disk yet.
+- virtual uint32 LogSize() = 0;
+-};
+-
+-// Get the logger for the specified severity level. The logger
+-// remains the property of the logging module and should not be
+-// deleted by the caller. Thread-safe.
+-extern GLOG_EXPORT Logger* GetLogger(LogSeverity level);
+-
+-// Set the logger for the specified severity level. The logger
+-// becomes the property of the logging module and should not
+-// be deleted by the caller. Thread-safe.
+-extern GLOG_EXPORT void SetLogger(LogSeverity level, Logger* logger);
+-
+-}
+-
+-// glibc has traditionally implemented two incompatible versions of
+-// strerror_r(). There is a poorly defined convention for picking the
+-// version that we want, but it is not clear whether it even works with
+-// all versions of glibc.
+-// So, instead, we provide this wrapper that automatically detects the
+-// version that is in use, and then implements POSIX semantics.
+-// N.B. In addition to what POSIX says, we also guarantee that "buf" will
+-// be set to an empty string, if this function failed. This means, in most
+-// cases, you do not need to check the error code and you can directly
+-// use the value of "buf". It will never have an undefined value.
+-// DEPRECATED: Use StrError(int) instead.
+-GLOG_EXPORT int posix_strerror_r(int err, char *buf, size_t len);
+-
+-// A thread-safe replacement for strerror(). Returns a string describing the
+-// given POSIX error code.
+-GLOG_EXPORT std::string StrError(int err);
+-
+-// A class for which we define operator<<, which does nothing.
+-class GLOG_EXPORT NullStream : public LogMessage::LogStream {
+- public:
+- // Initialize the LogStream so the messages can be written somewhere
+- // (they'll never be actually displayed). This will be needed if a
+- // NullStream& is implicitly converted to LogStream&, in which case
+- // the overloaded NullStream::operator<< will not be invoked.
+- NullStream() : LogMessage::LogStream(message_buffer_, 1, 0) { }
+- NullStream(const char* /*file*/, int /*line*/,
+- const CheckOpString& /*result*/) :
+- LogMessage::LogStream(message_buffer_, 1, 0) { }
+- NullStream &stream() { return *this; }
+- private:
+- // A very short buffer for messages (which we discard anyway). This
+- // will be needed if NullStream& converted to LogStream& (e.g. as a
+- // result of a conditional expression).
+- char message_buffer_[2];
+-};
+-
+-// Do nothing. This operator is inline, allowing the message to be
+-// compiled away. The message will not be compiled away if we do
+-// something like (flag ? LOG(INFO) : LOG(ERROR)) << message; when
+-// SKIP_LOG=WARNING. In those cases, NullStream will be implicitly
+-// converted to LogStream and the message will be computed and then
+-// quietly discarded.
+-template<class T>
+-inline NullStream& operator<<(NullStream &str, const T &) { return str; }
+-
+-// Similar to NullStream, but aborts the program (without stack
+-// trace), like LogMessageFatal.
+-class GLOG_EXPORT NullStreamFatal : public NullStream {
+- public:
+- NullStreamFatal() { }
+- NullStreamFatal(const char* file, int line, const CheckOpString& result) :
+- NullStream(file, line, result) { }
+-#if defined(_MSC_VER)
+-#pragma warning(push)
+-#pragma warning(disable : 4722)
+-#endif // _MSC_VER
+- __attribute__((noreturn)) ~NullStreamFatal() throw () { _exit(EXIT_FAILURE); }
+-#if defined(_MSC_VER)
+-#pragma warning(pop)
+-#endif // _MSC_VER
+-};
+-
+-// Install a signal handler that will dump signal information and a stack
+-// trace when the program crashes on certain signals. We'll install the
+-// signal handler for the following signals.
+-//
+-// SIGSEGV, SIGILL, SIGFPE, SIGABRT, SIGBUS, and SIGTERM.
+-//
+-// By default, the signal handler will write the failure dump to the
+-// standard error. You can customize the destination by installing your
+-// own writer function by InstallFailureWriter() below.
+-//
+-// Note on threading:
+-//
+-// The function should be called before threads are created, if you want
+-// to use the failure signal handler for all threads. The stack trace
+-// will be shown only for the thread that receives the signal. In other
+-// words, stack traces of other threads won't be shown.
+-GLOG_EXPORT void InstallFailureSignalHandler();
+-
+-// Installs a function that is used for writing the failure dump. "data"
+-// is the pointer to the beginning of a message to be written, and "size"
+-// is the size of the message. You should not expect the data is
+-// terminated with '\0'.
+-GLOG_EXPORT void InstallFailureWriter(
+- void (*writer)(const char* data, size_t size));
+-
+-}
+-
+-#pragma pop_macro("DECLARE_VARIABLE")
+-#pragma pop_macro("DECLARE_bool")
+-#pragma pop_macro("DECLARE_string")
+-#pragma pop_macro("DECLARE_int32")
+-#pragma pop_macro("DECLARE_uint32")
++// Not needed in Chrome.
+
+ #endif // GLOG_LOGGING_H
+diff --git a/base/third_party/symbolize/glog/raw_logging.h b/base/third_party/symbolize/glog/raw_logging.h
+index eda5fb456aee1..74619b9a0be7d 100644
+--- a/base/third_party/symbolize/glog/raw_logging.h
++++ b/base/third_party/symbolize/glog/raw_logging.h
+@@ -36,144 +36,6 @@
+ #ifndef GLOG_RAW_LOGGING_H
+ #define GLOG_RAW_LOGGING_H
+
+-#include <ctime>
+-
+-namespace google {
+-
+-#include <glog/log_severity.h>
+-#include <glog/logging.h>
+-#include <glog/vlog_is_on.h>
+-
+-#if defined(__GNUC__)
+-#pragma GCC diagnostic push
+-#pragma GCC diagnostic ignored "-Wvariadic-macros"
+-#endif
+-
+-// This is similar to LOG(severity) << format... and VLOG(level) << format..,
+-// but
+-// * it is to be used ONLY by low-level modules that can't use normal LOG()
+-// * it is desiged to be a low-level logger that does not allocate any
+-// memory and does not need any locks, hence:
+-// * it logs straight and ONLY to STDERR w/o buffering
+-// * it uses an explicit format and arguments list
+-// * it will silently chop off really long message strings
+-// Usage example:
+-// RAW_LOG(ERROR, "Failed foo with %i: %s", status, error);
+-// RAW_VLOG(3, "status is %i", status);
+-// These will print an almost standard log lines like this to stderr only:
+-// E20200821 211317 file.cc:123] RAW: Failed foo with 22: bad_file
+-// I20200821 211317 file.cc:142] RAW: status is 20
+-#define RAW_LOG(severity, ...) \
+- do { \
+- switch (google::GLOG_ ## severity) { \
+- case 0: \
+- RAW_LOG_INFO(__VA_ARGS__); \
+- break; \
+- case 1: \
+- RAW_LOG_WARNING(__VA_ARGS__); \
+- break; \
+- case 2: \
+- RAW_LOG_ERROR(__VA_ARGS__); \
+- break; \
+- case 3: \
+- RAW_LOG_FATAL(__VA_ARGS__); \
+- break; \
+- default: \
+- break; \
+- } \
+- } while (0)
+-
+-// The following STRIP_LOG testing is performed in the header file so that it's
+-// possible to completely compile out the logging code and the log messages.
+-#if !defined(STRIP_LOG) || STRIP_LOG == 0
+-#define RAW_VLOG(verboselevel, ...) \
+- do { \
+- if (VLOG_IS_ON(verboselevel)) { \
+- RAW_LOG_INFO(__VA_ARGS__); \
+- } \
+- } while (0)
+-#else
+-#define RAW_VLOG(verboselevel, ...) RawLogStub__(0, __VA_ARGS__)
+-#endif // STRIP_LOG == 0
+-
+-#if !defined(STRIP_LOG) || STRIP_LOG == 0
+-#define RAW_LOG_INFO(...) google::RawLog__(google::GLOG_INFO, \
+- __FILE__, __LINE__, __VA_ARGS__)
+-#else
+-#define RAW_LOG_INFO(...) google::RawLogStub__(0, __VA_ARGS__)
+-#endif // STRIP_LOG == 0
+-
+-#if !defined(STRIP_LOG) || STRIP_LOG <= 1
+-#define RAW_LOG_WARNING(...) google::RawLog__(google::GLOG_WARNING, \
+- __FILE__, __LINE__, __VA_ARGS__)
+-#else
+-#define RAW_LOG_WARNING(...) google::RawLogStub__(0, __VA_ARGS__)
+-#endif // STRIP_LOG <= 1
+-
+-#if !defined(STRIP_LOG) || STRIP_LOG <= 2
+-#define RAW_LOG_ERROR(...) google::RawLog__(google::GLOG_ERROR, \
+- __FILE__, __LINE__, __VA_ARGS__)
+-#else
+-#define RAW_LOG_ERROR(...) google::RawLogStub__(0, __VA_ARGS__)
+-#endif // STRIP_LOG <= 2
+-
+-#if !defined(STRIP_LOG) || STRIP_LOG <= 3
+-#define RAW_LOG_FATAL(...) google::RawLog__(google::GLOG_FATAL, \
+- __FILE__, __LINE__, __VA_ARGS__)
+-#else
+-#define RAW_LOG_FATAL(...) \
+- do { \
+- google::RawLogStub__(0, __VA_ARGS__); \
+- exit(EXIT_FAILURE); \
+- } while (0)
+-#endif // STRIP_LOG <= 3
+-
+-// Similar to CHECK(condition) << message,
+-// but for low-level modules: we use only RAW_LOG that does not allocate memory.
+-// We do not want to provide args list here to encourage this usage:
+-// if (!cond) RAW_LOG(FATAL, "foo ...", hard_to_compute_args);
+-// so that the args are not computed when not needed.
+-#define RAW_CHECK(condition, message) \
+- do { \
+- if (!(condition)) { \
+- RAW_LOG(FATAL, "Check %s failed: %s", #condition, message); \
+- } \
+- } while (0)
+-
+-// Debug versions of RAW_LOG and RAW_CHECK
+-#ifndef NDEBUG
+-
+-#define RAW_DLOG(severity, ...) RAW_LOG(severity, __VA_ARGS__)
+-#define RAW_DCHECK(condition, message) RAW_CHECK(condition, message)
+-
+-#else // NDEBUG
+-
+-#define RAW_DLOG(severity, ...) \
+- while (false) \
+- RAW_LOG(severity, __VA_ARGS__)
+-#define RAW_DCHECK(condition, message) \
+- while (false) \
+- RAW_CHECK(condition, message)
+-
+-#endif // NDEBUG
+-
+-#if defined(__GNUC__)
+-#pragma GCC diagnostic pop
+-#endif
+-
+-// Stub log function used to work around for unused variable warnings when
+-// building with STRIP_LOG > 0.
+-static inline void RawLogStub__(int /* ignored */, ...) {
+-}
+-
+-// Helper function to implement RAW_LOG and RAW_VLOG
+-// Logs format... at "severity" level, reporting it
+-// as called from file:line.
+-// This does not allocate memory or acquire locks.
+-GLOG_EXPORT void RawLog__(LogSeverity severity, const char* file, int line,
+- const char* format, ...)
+- __attribute__((__format__(__printf__, 4, 5)));
+-
+-}
++#define RAW_LOG(...) // Do nothing.
+
+ #endif // GLOG_RAW_LOGGING_H
diff --git a/base/third_party/symbolize/patches/003-minimal-utilities.patch b/base/third_party/symbolize/patches/003-minimal-utilities.patch
new file mode 100644
index 0000000..53c65a43
--- /dev/null
+++ b/base/third_party/symbolize/patches/003-minimal-utilities.patch
@@ -0,0 +1,184 @@
+diff --git a/base/third_party/symbolize/utilities.h b/base/third_party/symbolize/utilities.h
+index efa3a8d99856e..8c61380fad159 100644
+--- a/base/third_party/symbolize/utilities.h
++++ b/base/third_party/symbolize/utilities.h
+@@ -34,102 +34,6 @@
+ #ifndef UTILITIES_H__
+ #define UTILITIES_H__
+
+-// printf macros for size_t, in the style of inttypes.h
+-#ifdef _LP64
+-#define __PRIS_PREFIX "z"
+-#else
+-#define __PRIS_PREFIX
+-#endif
+-
+-// Use these macros after a % in a printf format string
+-// to get correct 32/64 bit behavior, like this:
+-// size_t size = records.size();
+-// printf("%"PRIuS"\n", size);
+-
+-#define PRIdS __PRIS_PREFIX "d"
+-#define PRIxS __PRIS_PREFIX "x"
+-#define PRIuS __PRIS_PREFIX "u"
+-#define PRIXS __PRIS_PREFIX "X"
+-#define PRIoS __PRIS_PREFIX "o"
+-
+-#include <string>
+-
+-#include "glog/logging.h"
+-
+-#if defined(GLOG_OS_WINDOWS)
+-# include "port.h"
+-#endif
+-
+-#include "config.h"
+-
+-// There are three different ways we can try to get the stack trace:
+-//
+-// 1) The libunwind library. This is still in development, and as a
+-// separate library adds a new dependency, but doesn't need a frame
+-// pointer. It also doesn't call malloc.
+-//
+-// 2) Our hand-coded stack-unwinder. This depends on a certain stack
+-// layout, which is used by gcc (and those systems using a
+-// gcc-compatible ABI) on x86 systems, at least since gcc 2.95.
+-// It uses the frame pointer to do its work.
+-//
+-// 3) The gdb unwinder -- also the one used by the c++ exception code.
+-// It's obviously well-tested, but has a fatal flaw: it can call
+-// malloc() from the unwinder. This is a problem because we're
+-// trying to use the unwinder to instrument malloc().
+-//
+-// 4) The Windows API CaptureStackTrace.
+-//
+-// Note: if you add a new implementation here, make sure it works
+-// correctly when GetStackTrace() is called with max_depth == 0.
+-// Some code may do that.
+-
+-#if defined(HAVE_LIB_UNWIND)
+-# define STACKTRACE_H "stacktrace_libunwind-inl.h"
+-#elif defined(HAVE__UNWIND_BACKTRACE) && defined(HAVE__UNWIND_GETIP)
+-# define STACKTRACE_H "stacktrace_unwind-inl.h"
+-#elif !defined(NO_FRAME_POINTER)
+-# if defined(__i386__) && __GNUC__ >= 2
+-# define STACKTRACE_H "stacktrace_x86-inl.h"
+-# elif (defined(__ppc__) || defined(__PPC__)) && __GNUC__ >= 2
+-# define STACKTRACE_H "stacktrace_powerpc-inl.h"
+-# elif defined(GLOG_OS_WINDOWS)
+-# define STACKTRACE_H "stacktrace_windows-inl.h"
+-# endif
+-#endif
+-
+-#if !defined(STACKTRACE_H) && defined(HAVE_EXECINFO_BACKTRACE)
+-# define STACKTRACE_H "stacktrace_generic-inl.h"
+-#endif
+-
+-#if defined(STACKTRACE_H)
+-# define HAVE_STACKTRACE
+-#endif
+-
+-#ifndef GLOG_NO_SYMBOLIZE_DETECTION
+-#ifndef HAVE_SYMBOLIZE
+-// defined by gcc
+-#if defined(__ELF__) && defined(GLOG_OS_LINUX)
+-# define HAVE_SYMBOLIZE
+-#elif defined(GLOG_OS_MACOSX) && defined(HAVE_DLADDR)
+-// Use dladdr to symbolize.
+-# define HAVE_SYMBOLIZE
+-#elif defined(GLOG_OS_WINDOWS)
+-// Use DbgHelp to symbolize
+-# define HAVE_SYMBOLIZE
+-#endif
+-#endif // !defined(HAVE_SYMBOLIZE)
+-#endif // !defined(GLOG_NO_SYMBOLIZE_DETECTION)
+-
+-#ifndef ARRAYSIZE
+-// There is a better way, but this is good enough for our purpose.
+-# define ARRAYSIZE(a) (sizeof(a) / sizeof(*(a)))
+-#endif
+-
+-_START_GOOGLE_NAMESPACE_
+-
+-namespace glog_internal_namespace_ {
+-
+ #ifdef HAVE___ATTRIBUTE__
+ # define ATTRIBUTE_NOINLINE __attribute__ ((noinline))
+ # define HAVE_ATTRIBUTE_NOINLINE
+@@ -140,76 +44,4 @@ namespace glog_internal_namespace_ {
+ # define ATTRIBUTE_NOINLINE
+ #endif
+
+-const char* ProgramInvocationShortName();
+-
+-int64 CycleClock_Now();
+-
+-int64 UsecToCycles(int64 usec);
+-WallTime WallTime_Now();
+-
+-int32 GetMainThreadPid();
+-bool PidHasChanged();
+-
+-pid_t GetTID();
+-
+-const std::string& MyUserName();
+-
+-// Get the part of filepath after the last path separator.
+-// (Doesn't modify filepath, contrary to basename() in libgen.h.)
+-const char* const_basename(const char* filepath);
+-
+-// Wrapper of __sync_val_compare_and_swap. If the GCC extension isn't
+-// defined, we try the CPU specific logics (we only support x86 and
+-// x86_64 for now) first, then use a naive implementation, which has a
+-// race condition.
+-template<typename T>
+-inline T sync_val_compare_and_swap(T* ptr, T oldval, T newval) {
+-#if defined(HAVE___SYNC_VAL_COMPARE_AND_SWAP)
+- return __sync_val_compare_and_swap(ptr, oldval, newval);
+-#elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
+- T ret;
+- __asm__ __volatile__("lock; cmpxchg %1, (%2);"
+- :"=a"(ret)
+- // GCC may produces %sil or %dil for
+- // constraint "r", but some of apple's gas
+- // dosn't know the 8 bit registers.
+- // We use "q" to avoid these registers.
+- :"q"(newval), "q"(ptr), "a"(oldval)
+- :"memory", "cc");
+- return ret;
+-#else
+- T ret = *ptr;
+- if (ret == oldval) {
+- *ptr = newval;
+- }
+- return ret;
+-#endif
+-}
+-
+-void DumpStackTraceToString(std::string* stacktrace);
+-
+-struct CrashReason {
+- CrashReason() : filename(0), line_number(0), message(0), depth(0) {}
+-
+- const char* filename;
+- int line_number;
+- const char* message;
+-
+- // We'll also store a bit of stack trace context at the time of crash as
+- // it may not be available later on.
+- void* stack[32];
+- int depth;
+-};
+-
+-void SetCrashReason(const CrashReason* r);
+-
+-void InitGoogleLoggingUtilities(const char* argv0);
+-void ShutdownGoogleLoggingUtilities();
+-
+-} // namespace glog_internal_namespace_
+-
+-_END_GOOGLE_NAMESPACE_
+-
+-using namespace GOOGLE_NAMESPACE::glog_internal_namespace_;
+-
+ #endif // UTILITIES_H__
diff --git a/base/third_party/symbolize/patches/004-add-missing-symbolize-header.patch b/base/third_party/symbolize/patches/004-add-missing-symbolize-header.patch
new file mode 100644
index 0000000..8bbddc2
--- /dev/null
+++ b/base/third_party/symbolize/patches/004-add-missing-symbolize-header.patch
@@ -0,0 +1,13 @@
+diff --git a/base/third_party/symbolize/symbolize.cc b/base/third_party/symbolize/symbolize.cc
+index 2cfd4c490cc94..e2f99a647bf88 100644
+--- a/base/third_party/symbolize/symbolize.cc
++++ b/base/third_party/symbolize/symbolize.cc
+@@ -52,6 +52,8 @@
+ #include GLOG_BUILD_CONFIG_INCLUDE
+ #endif // GLOG_BUILD_CONFIG_INCLUDE
+
++#include "symbolize.h"
++
+ #include "utilities.h"
+
+ #if defined(HAVE_SYMBOLIZE)
diff --git a/base/third_party/symbolize/patches/005-expose-file-helpers.patch b/base/third_party/symbolize/patches/005-expose-file-helpers.patch
new file mode 100644
index 0000000..fe768c5
--- /dev/null
+++ b/base/third_party/symbolize/patches/005-expose-file-helpers.patch
@@ -0,0 +1,135 @@
+diff --git a/base/third_party/symbolize/symbolize.cc b/base/third_party/symbolize/symbolize.cc
+index e2f99a647bf88..3c72fd1183f48 100644
+--- a/base/third_party/symbolize/symbolize.cc
++++ b/base/third_party/symbolize/symbolize.cc
+@@ -141,12 +141,16 @@ _END_GOOGLE_NAMESPACE_
+
+ _START_GOOGLE_NAMESPACE_
+
+-// Read up to "count" bytes from "offset" in the file pointed by file
+-// descriptor "fd" into the buffer starting at "buf" while handling short reads
+-// and EINTR. On success, return the number of bytes read. Otherwise, return
+-// -1.
+-static ssize_t ReadFromOffset(const int fd, void *buf, const size_t count,
+- const size_t offset) {
++FileDescriptor::~FileDescriptor() {
++ if (fd_ >= 0) {
++ close(fd_);
++ }
++}
++
++ssize_t ReadFromOffset(const int fd,
++ void* buf,
++ const size_t count,
++ const size_t offset) {
+ SAFE_ASSERT(fd >= 0);
+ SAFE_ASSERT(count <= static_cast<size_t>(std::numeric_limits<ssize_t>::max()));
+ char *buf0 = reinterpret_cast<char *>(buf);
+@@ -371,22 +375,6 @@ static bool GetSymbolFromObjectFile(const int fd,
+ }
+
+ namespace {
+-// Thin wrapper around a file descriptor so that the file descriptor
+-// gets closed for sure.
+-struct FileDescriptor {
+- const int fd_;
+- explicit FileDescriptor(int fd) : fd_(fd) {}
+- ~FileDescriptor() {
+- if (fd_ >= 0) {
+- close(fd_);
+- }
+- }
+- int get() { return fd_; }
+-
+- private:
+- FileDescriptor(const FileDescriptor &);
+- void operator=(const FileDescriptor&);
+-};
+
+ // Helper class for reading lines from file.
+ //
+@@ -503,20 +491,11 @@ static char *GetHex(const char *start, const char *end, uint64_t *hex) {
+ return const_cast<char *>(p);
+ }
+
+-// Searches for the object file (from /proc/self/maps) that contains
+-// the specified pc. If found, sets |start_address| to the start address
+-// of where this object file is mapped in memory, sets the module base
+-// address into |base_address|, copies the object file name into
+-// |out_file_name|, and attempts to open the object file. If the object
+-// file is opened successfully, returns the file descriptor. Otherwise,
+-// returns -1. |out_file_name_size| is the size of the file name buffer
+-// (including the null-terminator).
+-static ATTRIBUTE_NOINLINE int
+-OpenObjectFileContainingPcAndGetStartAddress(uint64_t pc,
+- uint64_t &start_address,
+- uint64_t &base_address,
+- char *out_file_name,
+- size_t out_file_name_size) {
++int OpenObjectFileContainingPcAndGetStartAddress(uint64_t pc,
++ uint64_t& start_address,
++ uint64_t& base_address,
++ char* out_file_name,
++ size_t out_file_name_size) {
+ int object_fd;
+
+ int maps_fd;
+diff --git a/base/third_party/symbolize/symbolize.h b/base/third_party/symbolize/symbolize.h
+index 5959e579ffc93..11b24fbd06f5c 100644
+--- a/base/third_party/symbolize/symbolize.h
++++ b/base/third_party/symbolize/symbolize.h
+@@ -94,17 +94,54 @@
+
+ _START_GOOGLE_NAMESPACE_
+
++// Read up to "count" bytes from "offset" in the file pointed by file
++// descriptor "fd" into the buffer starting at "buf" while handling short reads
++// and EINTR. On success, return the number of bytes read. Otherwise, return
++// -1.
++ssize_t ReadFromOffset(const int fd,
++ void* buf,
++ const size_t count,
++ const size_t offset);
++
+ // Gets the section header for the given name, if it exists. Returns true on
+ // success. Otherwise, returns false.
+ bool GetSectionHeaderByName(int fd, const char *name, size_t name_len,
+ ElfW(Shdr) *out);
+
++// Searches for the object file (from /proc/self/maps) that contains
++// the specified pc. If found, sets |start_address| to the start address
++// of where this object file is mapped in memory, sets the module base
++// address into |base_address|, copies the object file name into
++// |out_file_name|, and attempts to open the object file. If the object
++// file is opened successfully, returns the file descriptor. Otherwise,
++// returns -1. |out_file_name_size| is the size of the file name buffer
++// (including the null-terminator).
++ATTRIBUTE_NOINLINE int OpenObjectFileContainingPcAndGetStartAddress(
++ uint64_t pc,
++ uint64_t& start_address,
++ uint64_t& base_address,
++ char* out_file_name,
++ size_t out_file_name_size);
++
+ _END_GOOGLE_NAMESPACE_
+
+ #endif /* __ELF__ */
+
+ _START_GOOGLE_NAMESPACE_
+
++// Thin wrapper around a file descriptor so that the file descriptor
++// gets closed for sure.
++struct FileDescriptor {
++ const int fd_;
++ explicit FileDescriptor(int fd) : fd_(fd) {}
++ ~FileDescriptor();
++ int get() { return fd_; }
++
++ private:
++ FileDescriptor(const FileDescriptor &);
++ void operator=(const FileDescriptor&);
++};
++
+ // Restrictions on the callbacks that follow:
+ // - The callbacks must not use heaps but only use stacks.
+ // - The callbacks must be async-signal-safe.
diff --git a/base/third_party/symbolize/patches/006-use-sandbox-hook-for-open-object-file.patch b/base/third_party/symbolize/patches/006-use-sandbox-hook-for-open-object-file.patch
new file mode 100644
index 0000000..0081801
--- /dev/null
+++ b/base/third_party/symbolize/patches/006-use-sandbox-hook-for-open-object-file.patch
@@ -0,0 +1,70 @@
+diff --git a/base/third_party/symbolize/symbolize.cc b/base/third_party/symbolize/symbolize.cc
+index 3c72fd1183f48..20d4d4106d65b 100644
+--- a/base/third_party/symbolize/symbolize.cc
++++ b/base/third_party/symbolize/symbolize.cc
+@@ -491,11 +491,12 @@ static char *GetHex(const char *start, const char *end, uint64_t *hex) {
+ return const_cast<char *>(p);
+ }
+
+-int OpenObjectFileContainingPcAndGetStartAddress(uint64_t pc,
+- uint64_t& start_address,
+- uint64_t& base_address,
+- char* out_file_name,
+- size_t out_file_name_size) {
++static int OpenObjectFileContainingPcAndGetStartAddressNoHook(
++ uint64_t pc,
++ uint64_t& start_address,
++ uint64_t& base_address,
++ char* out_file_name,
++ size_t out_file_name_size) {
+ int object_fd;
+
+ int maps_fd;
+@@ -645,6 +646,20 @@ int OpenObjectFileContainingPcAndGetStartAddress(uint64_t pc,
+ }
+ }
+
++int OpenObjectFileContainingPcAndGetStartAddress(
++ uint64_t pc,
++ uint64_t& start_address,
++ uint64_t& base_address,
++ char* out_file_name,
++ size_t out_file_name_size) {
++ if (g_symbolize_open_object_file_callback) {
++ return g_symbolize_open_object_file_callback(
++ pc, start_address, base_address, out_file_name, out_file_name_size);
++ }
++ return OpenObjectFileContainingPcAndGetStartAddressNoHook(
++ pc, start_address, base_address, out_file_name, out_file_name_size);
++}
++
+ // POSIX doesn't define any async-signal safe function for converting
+ // an integer to ASCII. We'll have to define our own version.
+ // itoa_r() converts an (unsigned) integer to ASCII. It returns "buf", if the
+@@ -734,7 +749,6 @@ static ATTRIBUTE_NOINLINE bool SymbolizeAndDemangle(void *pc, char *out,
+ uint64_t pc0 = reinterpret_cast<uintptr_t>(pc);
+ uint64_t start_address = 0;
+ uint64_t base_address = 0;
+- int object_fd = -1;
+
+ if (out_size < 1) {
+ return false;
+@@ -742,16 +756,8 @@ static ATTRIBUTE_NOINLINE bool SymbolizeAndDemangle(void *pc, char *out,
+ out[0] = '\0';
+ SafeAppendString("(", out, out_size);
+
+- if (g_symbolize_open_object_file_callback) {
+- object_fd = g_symbolize_open_object_file_callback(pc0, start_address,
+- base_address, out + 1,
+- out_size - 1);
+- } else {
+- object_fd = OpenObjectFileContainingPcAndGetStartAddress(pc0, start_address,
+- base_address,
+- out + 1,
+- out_size - 1);
+- }
++ int object_fd = OpenObjectFileContainingPcAndGetStartAddress(
++ pc0, start_address, base_address, out + 1, out_size - 1);
+
+ FileDescriptor wrapped_object_fd(object_fd);
+
diff --git a/base/third_party/symbolize/patches/007-sys-types-h.patch b/base/third_party/symbolize/patches/007-sys-types-h.patch
new file mode 100644
index 0000000..cc47bd4
--- /dev/null
+++ b/base/third_party/symbolize/patches/007-sys-types-h.patch
@@ -0,0 +1,13 @@
+diff --git a/base/third_party/symbolize/symbolize.h b/base/third_party/symbolize/symbolize.h
+index 2a55c688aedfb..987569fdde67f 100644
+--- a/base/third_party/symbolize/symbolize.h
++++ b/base/third_party/symbolize/symbolize.h
+@@ -54,6 +54,8 @@
+ #ifndef BASE_SYMBOLIZE_H_
+ #define BASE_SYMBOLIZE_H_
+
++#include <sys/types.h> // for ssize_t
++
+ #include "utilities.h"
+ #include "config.h"
+ #include "glog/logging.h"
diff --git a/base/third_party/symbolize/patches/008-include-cstdlib.patch b/base/third_party/symbolize/patches/008-include-cstdlib.patch
new file mode 100644
index 0000000..ffa9edc
--- /dev/null
+++ b/base/third_party/symbolize/patches/008-include-cstdlib.patch
@@ -0,0 +1,12 @@
+diff --git a/base/third_party/symbolize/symbolize.cc b/base/third_party/symbolize/symbolize.cc
+index 5287e5bb11007..a3b8399f411bf 100644
+--- a/base/third_party/symbolize/symbolize.cc
++++ b/base/third_party/symbolize/symbolize.cc
+@@ -59,6 +59,7 @@
+ #if defined(HAVE_SYMBOLIZE)
+
+ #include <cstring>
++#include <cstdlib>
+
+ #include <algorithm>
+ #include <limits>
diff --git a/base/third_party/symbolize/patches/009-clone-absl-demangle.patch b/base/third_party/symbolize/patches/009-clone-absl-demangle.patch
new file mode 100644
index 0000000..3adae98
--- /dev/null
+++ b/base/third_party/symbolize/patches/009-clone-absl-demangle.patch
@@ -0,0 +1,2388 @@
+diff --git a/base/third_party/symbolize/demangle.cc b/base/third_party/symbolize/demangle.cc
+index 9276c5b879a8c..2632646dd4072 100644
+--- a/base/third_party/symbolize/demangle.cc
++++ b/base/third_party/symbolize/demangle.cc
+@@ -34,13 +34,14 @@
+ //
+ // Note that we only have partial C++0x support yet.
+
+-#include <cstdio> // for NULL
+-
+ #include "demangle.h"
+-#include "utilities.h"
+
+ #if defined(GLOG_OS_WINDOWS)
+ #include <dbghelp.h>
++#else
++#include <cstdint>
++#include <cstdio>
++#include <limits>
+ #endif
+
+ _START_GOOGLE_NAMESPACE_
+@@ -49,117 +50,199 @@ _START_GOOGLE_NAMESPACE_
+ typedef struct {
+ const char *abbrev;
+ const char *real_name;
++ // Number of arguments in <expression> context, or 0 if disallowed.
++ int arity;
+ } AbbrevPair;
+
+ // List of operators from Itanium C++ ABI.
+ static const AbbrevPair kOperatorList[] = {
+- { "nw", "new" },
+- { "na", "new[]" },
+- { "dl", "delete" },
+- { "da", "delete[]" },
+- { "ps", "+" },
+- { "ng", "-" },
+- { "ad", "&" },
+- { "de", "*" },
+- { "co", "~" },
+- { "pl", "+" },
+- { "mi", "-" },
+- { "ml", "*" },
+- { "dv", "/" },
+- { "rm", "%" },
+- { "an", "&" },
+- { "or", "|" },
+- { "eo", "^" },
+- { "aS", "=" },
+- { "pL", "+=" },
+- { "mI", "-=" },
+- { "mL", "*=" },
+- { "dV", "/=" },
+- { "rM", "%=" },
+- { "aN", "&=" },
+- { "oR", "|=" },
+- { "eO", "^=" },
+- { "ls", "<<" },
+- { "rs", ">>" },
+- { "lS", "<<=" },
+- { "rS", ">>=" },
+- { "eq", "==" },
+- { "ne", "!=" },
+- { "lt", "<" },
+- { "gt", ">" },
+- { "le", "<=" },
+- { "ge", ">=" },
+- { "nt", "!" },
+- { "aa", "&&" },
+- { "oo", "||" },
+- { "pp", "++" },
+- { "mm", "--" },
+- { "cm", "," },
+- { "pm", "->*" },
+- { "pt", "->" },
+- { "cl", "()" },
+- { "ix", "[]" },
+- { "qu", "?" },
+- { "st", "sizeof" },
+- { "sz", "sizeof" },
+- { NULL, NULL },
++ // New has special syntax (not currently supported).
++ {"nw", "new", 0},
++ {"na", "new[]", 0},
++
++ // Works except that the 'gs' prefix is not supported.
++ {"dl", "delete", 1},
++ {"da", "delete[]", 1},
++
++ {"ps", "+", 1}, // "positive"
++ {"ng", "-", 1}, // "negative"
++ {"ad", "&", 1}, // "address-of"
++ {"de", "*", 1}, // "dereference"
++ {"co", "~", 1},
++
++ {"pl", "+", 2},
++ {"mi", "-", 2},
++ {"ml", "*", 2},
++ {"dv", "/", 2},
++ {"rm", "%", 2},
++ {"an", "&", 2},
++ {"or", "|", 2},
++ {"eo", "^", 2},
++ {"aS", "=", 2},
++ {"pL", "+=", 2},
++ {"mI", "-=", 2},
++ {"mL", "*=", 2},
++ {"dV", "/=", 2},
++ {"rM", "%=", 2},
++ {"aN", "&=", 2},
++ {"oR", "|=", 2},
++ {"eO", "^=", 2},
++ {"ls", "<<", 2},
++ {"rs", ">>", 2},
++ {"lS", "<<=", 2},
++ {"rS", ">>=", 2},
++ {"eq", "==", 2},
++ {"ne", "!=", 2},
++ {"lt", "<", 2},
++ {"gt", ">", 2},
++ {"le", "<=", 2},
++ {"ge", ">=", 2},
++ {"nt", "!", 1},
++ {"aa", "&&", 2},
++ {"oo", "||", 2},
++ {"pp", "++", 1},
++ {"mm", "--", 1},
++ {"cm", ",", 2},
++ {"pm", "->*", 2},
++ {"pt", "->", 0}, // Special syntax
++ {"cl", "()", 0}, // Special syntax
++ {"ix", "[]", 2},
++ {"qu", "?", 3},
++ {"st", "sizeof", 0}, // Special syntax
++ {"sz", "sizeof", 1}, // Not a real operator name, but used in expressions.
++ {nullptr, nullptr, 0},
+ };
+
+ // List of builtin types from Itanium C++ ABI.
++//
++// Invariant: only one- or two-character type abbreviations here.
+ static const AbbrevPair kBuiltinTypeList[] = {
+- { "v", "void" },
+- { "w", "wchar_t" },
+- { "b", "bool" },
+- { "c", "char" },
+- { "a", "signed char" },
+- { "h", "unsigned char" },
+- { "s", "short" },
+- { "t", "unsigned short" },
+- { "i", "int" },
+- { "j", "unsigned int" },
+- { "l", "long" },
+- { "m", "unsigned long" },
+- { "x", "long long" },
+- { "y", "unsigned long long" },
+- { "n", "__int128" },
+- { "o", "unsigned __int128" },
+- { "f", "float" },
+- { "d", "double" },
+- { "e", "long double" },
+- { "g", "__float128" },
+- { "z", "ellipsis" },
+- { NULL, NULL }
++ {"v", "void", 0},
++ {"w", "wchar_t", 0},
++ {"b", "bool", 0},
++ {"c", "char", 0},
++ {"a", "signed char", 0},
++ {"h", "unsigned char", 0},
++ {"s", "short", 0},
++ {"t", "unsigned short", 0},
++ {"i", "int", 0},
++ {"j", "unsigned int", 0},
++ {"l", "long", 0},
++ {"m", "unsigned long", 0},
++ {"x", "long long", 0},
++ {"y", "unsigned long long", 0},
++ {"n", "__int128", 0},
++ {"o", "unsigned __int128", 0},
++ {"f", "float", 0},
++ {"d", "double", 0},
++ {"e", "long double", 0},
++ {"g", "__float128", 0},
++ {"z", "ellipsis", 0},
++
++ {"De", "decimal128", 0}, // IEEE 754r decimal floating point (128 bits)
++ {"Dd", "decimal64", 0}, // IEEE 754r decimal floating point (64 bits)
++ {"Dc", "decltype(auto)", 0},
++ {"Da", "auto", 0},
++ {"Dn", "std::nullptr_t", 0}, // i.e., decltype(nullptr)
++ {"Df", "decimal32", 0}, // IEEE 754r decimal floating point (32 bits)
++ {"Di", "char32_t", 0},
++ {"Du", "char8_t", 0},
++ {"Ds", "char16_t", 0},
++ {"Dh", "float16", 0}, // IEEE 754r half-precision float (16 bits)
++ {nullptr, nullptr, 0},
+ };
+
+ // List of substitutions Itanium C++ ABI.
+ static const AbbrevPair kSubstitutionList[] = {
+- { "St", "" },
+- { "Sa", "allocator" },
+- { "Sb", "basic_string" },
+- // std::basic_string<char, std::char_traits<char>,std::allocator<char> >
+- { "Ss", "string"},
+- // std::basic_istream<char, std::char_traits<char> >
+- { "Si", "istream" },
+- // std::basic_ostream<char, std::char_traits<char> >
+- { "So", "ostream" },
+- // std::basic_iostream<char, std::char_traits<char> >
+- { "Sd", "iostream" },
+- { NULL, NULL }
++ {"St", "", 0},
++ {"Sa", "allocator", 0},
++ {"Sb", "basic_string", 0},
++ // std::basic_string<char, std::char_traits<char>,std::allocator<char> >
++ {"Ss", "string", 0},
++ // std::basic_istream<char, std::char_traits<char> >
++ {"Si", "istream", 0},
++ // std::basic_ostream<char, std::char_traits<char> >
++ {"So", "ostream", 0},
++ // std::basic_iostream<char, std::char_traits<char> >
++ {"Sd", "iostream", 0},
++ {nullptr, nullptr, 0},
+ };
+
+-// State needed for demangling.
++// State needed for demangling. This struct is copied in almost every stack
++// frame, so every byte counts.
++typedef struct {
++ int mangled_idx; // Cursor of mangled name.
++ int out_cur_idx; // Cursor of output string.
++ int prev_name_idx; // For constructors/destructors.
++ unsigned int prev_name_length : 16; // For constructors/destructors.
++ signed int nest_level : 15; // For nested names.
++ unsigned int append : 1; // Append flag.
++ // Note: for some reason MSVC can't pack "bool append : 1" into the same int
++ // with the above two fields, so we use an int instead. Amusingly it can pack
++ // "signed bool" as expected, but relying on that to continue to be a legal
++ // type seems ill-advised (as it's illegal in at least clang).
++} ParseState;
++
++static_assert(sizeof(ParseState) == 4 * sizeof(int),
++ "unexpected size of ParseState");
++
++// One-off state for demangling that's not subject to backtracking -- either
++// constant data, data that's intentionally immune to backtracking (steps), or
++// data that would never be changed by backtracking anyway (recursion_depth).
++//
++// Only one copy of this exists for each call to Demangle, so the size of this
++// struct is nearly inconsequential.
+ typedef struct {
+- const char *mangled_cur; // Cursor of mangled name.
+- char *out_cur; // Cursor of output string.
+- const char *out_begin; // Beginning of output string.
+- const char *out_end; // End of output string.
+- const char *prev_name; // For constructors/destructors.
+- ssize_t prev_name_length; // For constructors/destructors.
+- short nest_level; // For nested names.
+- bool append; // Append flag.
+- bool overflowed; // True if output gets overflowed.
++ const char *mangled_begin; // Beginning of input string.
++ char *out; // Beginning of output string.
++ int out_end_idx; // One past last allowed output character.
++ int recursion_depth; // For stack exhaustion prevention.
++ int steps; // Cap how much work we'll do, regardless of depth.
++ ParseState parse_state; // Backtrackable state copied for most frames.
+ } State;
+
++namespace {
++// Prevent deep recursion / stack exhaustion.
++// Also prevent unbounded handling of complex inputs.
++class ComplexityGuard {
++ public:
++ explicit ComplexityGuard(State *state) : state_(state) {
++ ++state->recursion_depth;
++ ++state->steps;
++ }
++ ~ComplexityGuard() { --state_->recursion_depth; }
++
++ // 256 levels of recursion seems like a reasonable upper limit on depth.
++ // 128 is not enough to demagle synthetic tests from demangle_unittest.txt:
++ // "_ZaaZZZZ..." and "_ZaaZcvZcvZ..."
++ static constexpr int kRecursionDepthLimit = 256;
++
++ // We're trying to pick a charitable upper-limit on how many parse steps are
++ // necessary to handle something that a human could actually make use of.
++ // This is mostly in place as a bound on how much work we'll do if we are
++ // asked to demangle an mangled name from an untrusted source, so it should be
++ // much larger than the largest expected symbol, but much smaller than the
++ // amount of work we can do in, e.g., a second.
++ //
++ // Some real-world symbols from an arbitrary binary started failing between
++ // 2^12 and 2^13, so we multiply the latter by an extra factor of 16 to set
++ // the limit.
++ //
++ // Spending one second on 2^17 parse steps would require each step to take
++ // 7.6us, or ~30000 clock cycles, so it's safe to say this can be done in
++ // under a second.
++ static constexpr int kParseStepsLimit = 1 << 17;
++
++ bool IsTooComplex() const {
++ return state_->recursion_depth > kRecursionDepthLimit ||
++ state_->steps > kParseStepsLimit;
++ }
++
++ private:
++ State *state_;
++};
++} // namespace
++
+ // We don't use strlen() in libc since it's not guaranteed to be async
+ // signal safe.
+ static size_t StrLen(const char *str) {
+@@ -172,8 +255,8 @@ static size_t StrLen(const char *str) {
+ }
+
+ // Returns true if "str" has at least "n" characters remaining.
+-static bool AtLeastNumCharsRemaining(const char *str, ssize_t n) {
+- for (ssize_t i = 0; i < n; ++i) {
++static bool AtLeastNumCharsRemaining(const char *str, size_t n) {
++ for (size_t i = 0; i < n; ++i) {
+ if (str[i] == '\0') {
+ return false;
+ }
+@@ -184,32 +267,42 @@ static bool AtLeastNumCharsRemaining(const char *str, ssize_t n) {
+ // Returns true if "str" has "prefix" as a prefix.
+ static bool StrPrefix(const char *str, const char *prefix) {
+ size_t i = 0;
+- while (str[i] != '\0' && prefix[i] != '\0' &&
+- str[i] == prefix[i]) {
++ while (str[i] != '\0' && prefix[i] != '\0' && str[i] == prefix[i]) {
+ ++i;
+ }
+ return prefix[i] == '\0'; // Consumed everything in "prefix".
+ }
+
+-static void InitState(State *state, const char *mangled,
+- char *out, size_t out_size) {
+- state->mangled_cur = mangled;
+- state->out_cur = out;
+- state->out_begin = out;
+- state->out_end = out + out_size;
+- state->prev_name = NULL;
+- state->prev_name_length = -1;
+- state->nest_level = -1;
+- state->append = true;
+- state->overflowed = false;
++static void InitState(State* state,
++ const char* mangled,
++ char* out,
++ size_t out_size) {
++ state->mangled_begin = mangled;
++ state->out = out;
++ state->out_end_idx = static_cast<int>(out_size);
++ state->recursion_depth = 0;
++ state->steps = 0;
++
++ state->parse_state.mangled_idx = 0;
++ state->parse_state.out_cur_idx = 0;
++ state->parse_state.prev_name_idx = 0;
++ state->parse_state.prev_name_length = 0;
++ state->parse_state.nest_level = -1;
++ state->parse_state.append = true;
++}
++
++static inline const char *RemainingInput(State *state) {
++ return &state->mangled_begin[state->parse_state.mangled_idx];
+ }
+
+-// Returns true and advances "mangled_cur" if we find "one_char_token"
+-// at "mangled_cur" position. It is assumed that "one_char_token" does
++// Returns true and advances "mangled_idx" if we find "one_char_token"
++// at "mangled_idx" position. It is assumed that "one_char_token" does
+ // not contain '\0'.
+ static bool ParseOneCharToken(State *state, const char one_char_token) {
+- if (state->mangled_cur[0] == one_char_token) {
+- ++state->mangled_cur;
++ ComplexityGuard guard(state);
++ if (guard.IsTooComplex()) return false;
++ if (RemainingInput(state)[0] == one_char_token) {
++ ++state->parse_state.mangled_idx;
+ return true;
+ }
+ return false;
+@@ -219,9 +312,11 @@ static bool ParseOneCharToken(State *state, const char one_char_token) {
+ // at "mangled_cur" position. It is assumed that "two_char_token" does
+ // not contain '\0'.
+ static bool ParseTwoCharToken(State *state, const char *two_char_token) {
+- if (state->mangled_cur[0] == two_char_token[0] &&
+- state->mangled_cur[1] == two_char_token[1]) {
+- state->mangled_cur += 2;
++ ComplexityGuard guard(state);
++ if (guard.IsTooComplex()) return false;
++ if (RemainingInput(state)[0] == two_char_token[0] &&
++ RemainingInput(state)[1] == two_char_token[1]) {
++ state->parse_state.mangled_idx += 2;
+ return true;
+ }
+ return false;
+@@ -230,21 +325,35 @@ static bool ParseTwoCharToken(State *state, const char *two_char_token) {
+ // Returns true and advances "mangled_cur" if we find any character in
+ // "char_class" at "mangled_cur" position.
+ static bool ParseCharClass(State *state, const char *char_class) {
++ ComplexityGuard guard(state);
++ if (guard.IsTooComplex()) return false;
++ if (RemainingInput(state)[0] == '\0') {
++ return false;
++ }
+ const char *p = char_class;
+ for (; *p != '\0'; ++p) {
+- if (state->mangled_cur[0] == *p) {
+- ++state->mangled_cur;
++ if (RemainingInput(state)[0] == *p) {
++ ++state->parse_state.mangled_idx;
+ return true;
+ }
+ }
+ return false;
+ }
+
+-// This function is used for handling an optional non-terminal.
+-static bool Optional(bool) {
+- return true;
++static bool ParseDigit(State *state, int *digit) {
++ char c = RemainingInput(state)[0];
++ if (ParseCharClass(state, "0123456789")) {
++ if (digit != nullptr) {
++ *digit = c - '0';
++ }
++ return true;
++ }
++ return false;
+ }
+
++// This function is used for handling an optional non-terminal.
++static bool Optional(bool /*status*/) { return true; }
++
+ // This function is used for handling <non-terminal>+ syntax.
+ typedef bool (*ParseFunc)(State *);
+ static bool OneOrMore(ParseFunc parse_func, State *state) {
+@@ -266,146 +375,179 @@ static bool ZeroOrMore(ParseFunc parse_func, State *state) {
+ return true;
+ }
+
+-// Append "str" at "out_cur". If there is an overflow, "overflowed"
+-// is set to true for later use. The output string is ensured to
++// Append "str" at "out_cur_idx". If there is an overflow, out_cur_idx is
++// set to out_end_idx+1. The output string is ensured to
+ // always terminate with '\0' as long as there is no overflow.
+-static void Append(State *state, const char * const str, ssize_t length) {
+- for (ssize_t i = 0; i < length; ++i) {
+- if (state->out_cur + 1 < state->out_end) { // +1 for '\0'
+- *state->out_cur = str[i];
+- ++state->out_cur;
++static void Append(State *state, const char *const str, const size_t length) {
++ for (size_t i = 0; i < length; ++i) {
++ if (state->parse_state.out_cur_idx + 1 <
++ state->out_end_idx) { // +1 for '\0'
++ state->out[state->parse_state.out_cur_idx++] = str[i];
+ } else {
+- state->overflowed = true;
++ // signal overflow
++ state->parse_state.out_cur_idx = state->out_end_idx + 1;
+ break;
+ }
+ }
+- if (!state->overflowed) {
+- *state->out_cur = '\0'; // Terminate it with '\0'
++ if (state->parse_state.out_cur_idx < state->out_end_idx) {
++ state->out[state->parse_state.out_cur_idx] =
++ '\0'; // Terminate it with '\0'
+ }
+ }
+
+ // We don't use equivalents in libc to avoid locale issues.
+-static bool IsLower(char c) {
+- return c >= 'a' && c <= 'z';
+-}
++static bool IsLower(char c) { return c >= 'a' && c <= 'z'; }
+
+ static bool IsAlpha(char c) {
+ return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z');
+ }
+
+-static bool IsDigit(char c) {
+- return c >= '0' && c <= '9';
+-}
++static bool IsDigit(char c) { return c >= '0' && c <= '9'; }
+
+ // Returns true if "str" is a function clone suffix. These suffixes are used
+-// by GCC 4.5.x and later versions to indicate functions which have been
+-// cloned during optimization. We treat any sequence (.<alpha>+.<digit>+)+ as
+-// a function clone suffix.
++// by GCC 4.5.x and later versions (and our locally-modified version of GCC
++// 4.4.x) to indicate functions which have been cloned during optimization.
++// We treat any sequence (.<alpha>+.<digit>+)+ as a function clone suffix.
++// Additionally, '_' is allowed along with the alphanumeric sequence.
+ static bool IsFunctionCloneSuffix(const char *str) {
+ size_t i = 0;
+ while (str[i] != '\0') {
+- // Consume a single .<alpha>+.<digit>+ sequence.
+- if (str[i] != '.' || !IsAlpha(str[i + 1])) {
+- return false;
++ bool parsed = false;
++ // Consume a single [.<alpha> | _]*[.<digit>]* sequence.
++ if (str[i] == '.' && (IsAlpha(str[i + 1]) || str[i + 1] == '_')) {
++ parsed = true;
++ i += 2;
++ while (IsAlpha(str[i]) || str[i] == '_') {
++ ++i;
++ }
+ }
+- i += 2;
+- while (IsAlpha(str[i])) {
+- ++i;
++ if (str[i] == '.' && IsDigit(str[i + 1])) {
++ parsed = true;
++ i += 2;
++ while (IsDigit(str[i])) {
++ ++i;
++ }
+ }
+- if (str[i] != '.' || !IsDigit(str[i + 1])) {
++ if (!parsed)
+ return false;
+- }
+- i += 2;
+- while (IsDigit(str[i])) {
+- ++i;
+- }
+ }
+ return true; // Consumed everything in "str".
+ }
+
++static bool EndsWith(State *state, const char chr) {
++ return state->parse_state.out_cur_idx > 0 &&
++ state->parse_state.out_cur_idx < state->out_end_idx &&
++ chr == state->out[state->parse_state.out_cur_idx - 1];
++}
++
+ // Append "str" with some tweaks, iff "append" state is true.
+-// Returns true so that it can be placed in "if" conditions.
+-static void MaybeAppendWithLength(State *state, const char * const str,
+- ssize_t length) {
+- if (state->append && length > 0) {
++static void MaybeAppendWithLength(State *state, const char *const str,
++ const size_t length) {
++ if (state->parse_state.append && length > 0) {
+ // Append a space if the output buffer ends with '<' and "str"
+ // starts with '<' to avoid <<<.
+- if (str[0] == '<' && state->out_begin < state->out_cur &&
+- state->out_cur[-1] == '<') {
++ if (str[0] == '<' && EndsWith(state, '<')) {
+ Append(state, " ", 1);
+ }
+- // Remember the last identifier name for ctors/dtors.
+- if (IsAlpha(str[0]) || str[0] == '_') {
+- state->prev_name = state->out_cur;
+- state->prev_name_length = length;
++ // Remember the last identifier name for ctors/dtors,
++ // but only if we haven't yet overflown the buffer.
++ if (state->parse_state.out_cur_idx < state->out_end_idx &&
++ (IsAlpha(str[0]) || str[0] == '_')) {
++ state->parse_state.prev_name_idx = state->parse_state.out_cur_idx;
++ state->parse_state.prev_name_length = static_cast<unsigned int>(length);
+ }
+ Append(state, str, length);
+ }
+ }
+
+-// A convenient wrapper arount MaybeAppendWithLength().
+-static bool MaybeAppend(State *state, const char * const str) {
+- if (state->append) {
++// Appends a positive decimal number to the output if appending is enabled.
++static bool MaybeAppendDecimal(State *state, int val) {
++ // Max {32-64}-bit unsigned int is 20 digits.
++ constexpr size_t kMaxLength = 20;
++ char buf[kMaxLength];
++
++ // We can't use itoa or sprintf as neither is specified to be
++ // async-signal-safe.
++ if (state->parse_state.append) {
++ // We can't have a one-before-the-beginning pointer, so instead start with
++ // one-past-the-end and manipulate one character before the pointer.
++ char *p = &buf[kMaxLength];
++ do { // val=0 is the only input that should write a leading zero digit.
++ *--p = static_cast<char>((val % 10) + '0');
++ val /= 10;
++ } while (p > buf && val != 0);
++
++ // 'p' landed on the last character we set. How convenient.
++ Append(state, p, kMaxLength - static_cast<size_t>(p - buf));
++ }
++
++ return true;
++}
++
++// A convenient wrapper around MaybeAppendWithLength().
++// Returns true so that it can be placed in "if" conditions.
++static bool MaybeAppend(State *state, const char *const str) {
++ if (state->parse_state.append) {
+ size_t length = StrLen(str);
+- MaybeAppendWithLength(state, str, static_cast<ssize_t>(length));
++ MaybeAppendWithLength(state, str, length);
+ }
+ return true;
+ }
+
+ // This function is used for handling nested names.
+ static bool EnterNestedName(State *state) {
+- state->nest_level = 0;
++ state->parse_state.nest_level = 0;
+ return true;
+ }
+
+ // This function is used for handling nested names.
+-static bool LeaveNestedName(State *state, short prev_value) {
+- state->nest_level = prev_value;
++static bool LeaveNestedName(State *state, int16_t prev_value) {
++ state->parse_state.nest_level = prev_value;
+ return true;
+ }
+
+ // Disable the append mode not to print function parameters, etc.
+ static bool DisableAppend(State *state) {
+- state->append = false;
++ state->parse_state.append = false;
+ return true;
+ }
+
+ // Restore the append mode to the previous state.
+ static bool RestoreAppend(State *state, bool prev_value) {
+- state->append = prev_value;
++ state->parse_state.append = prev_value;
+ return true;
+ }
+
+ // Increase the nest level for nested names.
+ static void MaybeIncreaseNestLevel(State *state) {
+- if (state->nest_level > -1) {
+- ++state->nest_level;
++ if (state->parse_state.nest_level > -1) {
++ ++state->parse_state.nest_level;
+ }
+ }
+
+ // Appends :: for nested names if necessary.
+ static void MaybeAppendSeparator(State *state) {
+- if (state->nest_level >= 1) {
++ if (state->parse_state.nest_level >= 1) {
+ MaybeAppend(state, "::");
+ }
+ }
+
+ // Cancel the last separator if necessary.
+ static void MaybeCancelLastSeparator(State *state) {
+- if (state->nest_level >= 1 && state->append &&
+- state->out_begin <= state->out_cur - 2) {
+- state->out_cur -= 2;
+- *state->out_cur = '\0';
++ if (state->parse_state.nest_level >= 1 && state->parse_state.append &&
++ state->parse_state.out_cur_idx >= 2) {
++ state->parse_state.out_cur_idx -= 2;
++ state->out[state->parse_state.out_cur_idx] = '\0';
+ }
+ }
+
+ // Returns true if the identifier of the given length pointed to by
+ // "mangled_cur" is anonymous namespace.
+-static bool IdentifierIsAnonymousNamespace(State *state, ssize_t length) {
++static bool IdentifierIsAnonymousNamespace(State *state, size_t length) {
++ // Returns true if "anon_prefix" is a proper prefix of "mangled_cur".
+ static const char anon_prefix[] = "_GLOBAL__N_";
+- return (length > static_cast<ssize_t>(sizeof(anon_prefix)) -
+- 1 && // Should be longer.
+- StrPrefix(state->mangled_cur, anon_prefix));
++ return (length > (sizeof(anon_prefix) - 1) &&
++ StrPrefix(RemainingInput(state), anon_prefix));
+ }
+
+ // Forward declarations of our parsing functions.
+@@ -413,24 +555,24 @@ static bool ParseMangledName(State *state);
+ static bool ParseEncoding(State *state);
+ static bool ParseName(State *state);
+ static bool ParseUnscopedName(State *state);
+-static bool ParseUnscopedTemplateName(State *state);
+ static bool ParseNestedName(State *state);
+ static bool ParsePrefix(State *state);
+ static bool ParseUnqualifiedName(State *state);
+ static bool ParseSourceName(State *state);
+ static bool ParseLocalSourceName(State *state);
++static bool ParseUnnamedTypeName(State *state);
+ static bool ParseNumber(State *state, int *number_out);
+ static bool ParseFloatNumber(State *state);
+ static bool ParseSeqId(State *state);
+-static bool ParseIdentifier(State *state, ssize_t length);
+-static bool ParseAbiTags(State *state);
+-static bool ParseAbiTag(State *state);
+-static bool ParseOperatorName(State *state);
++static bool ParseIdentifier(State *state, size_t length);
++static bool ParseOperatorName(State *state, int *arity);
+ static bool ParseSpecialName(State *state);
+ static bool ParseCallOffset(State *state);
+ static bool ParseNVOffset(State *state);
+ static bool ParseVOffset(State *state);
++static bool ParseAbiTags(State *state);
+ static bool ParseCtorDtorName(State *state);
++static bool ParseDecltype(State *state);
+ static bool ParseType(State *state);
+ static bool ParseCVQualifiers(State *state);
+ static bool ParseBuiltinType(State *state);
+@@ -443,11 +585,15 @@ static bool ParseTemplateParam(State *state);
+ static bool ParseTemplateTemplateParam(State *state);
+ static bool ParseTemplateArgs(State *state);
+ static bool ParseTemplateArg(State *state);
++static bool ParseBaseUnresolvedName(State *state);
++static bool ParseUnresolvedName(State *state);
+ static bool ParseExpression(State *state);
+ static bool ParseExprPrimary(State *state);
++static bool ParseExprCastValue(State *state);
+ static bool ParseLocalName(State *state);
++static bool ParseLocalNameSuffix(State *state);
+ static bool ParseDiscriminator(State *state);
+-static bool ParseSubstitution(State *state);
++static bool ParseSubstitution(State *state, bool accept_std);
+
+ // Implementation note: the following code is a straightforward
+ // translation of the Itanium C++ ABI defined in BNF with a couple of
+@@ -459,11 +605,12 @@ static bool ParseSubstitution(State *state);
+ // - Reorder patterns to give greedier functions precedence
+ // We'll mark "Less greedy than" for these cases in the code
+ //
+-// Each parsing function changes the state and returns true on
+-// success. Otherwise, don't change the state and returns false. To
+-// ensure that the state isn't changed in the latter case, we save the
+-// original state before we call more than one parsing functions
+-// consecutively with &&, and restore the state if unsuccessful. See
++// Each parsing function changes the parse state and returns true on
++// success, or returns false and doesn't change the parse state (note:
++// the parse-steps counter increases regardless of success or failure).
++// To ensure that the parse state isn't changed in the latter case, we
++// save the original state before we call multiple parsing functions
++// consecutively with &&, and restore it if unsuccessful. See
+ // ParseEncoding() as an example of this convention. We follow the
+ // convention throughout the code.
+ //
+@@ -477,10 +624,12 @@ static bool ParseSubstitution(State *state);
+ //
+ // Reference:
+ // - Itanium C++ ABI
+-// <http://www.codesourcery.com/cxx-abi/abi.html#mangling>
++// <https://itanium-cxx-abi.github.io/cxx-abi/abi.html#mangling>
+
+ // <mangled-name> ::= _Z <encoding>
+ static bool ParseMangledName(State *state) {
++ ComplexityGuard guard(state);
++ if (guard.IsTooComplex()) return false;
+ return ParseTwoCharToken(state, "_Z") && ParseEncoding(state);
+ }
+
+@@ -488,13 +637,18 @@ static bool ParseMangledName(State *state) {
+ // ::= <(data) name>
+ // ::= <special-name>
+ static bool ParseEncoding(State *state) {
+- State copy = *state;
+- if (ParseName(state) && ParseBareFunctionType(state)) {
++ ComplexityGuard guard(state);
++ if (guard.IsTooComplex()) return false;
++ // Implementing the first two productions together as <name>
++ // [<bare-function-type>] avoids exponential blowup of backtracking.
++ //
++ // Since Optional(...) can't fail, there's no need to copy the state for
++ // backtracking.
++ if (ParseName(state) && Optional(ParseBareFunctionType(state))) {
+ return true;
+ }
+- *state = copy;
+
+- if (ParseName(state) || ParseSpecialName(state)) {
++ if (ParseSpecialName(state)) {
+ return true;
+ }
+ return false;
+@@ -505,60 +659,73 @@ static bool ParseEncoding(State *state) {
+ // ::= <unscoped-name>
+ // ::= <local-name>
+ static bool ParseName(State *state) {
++ ComplexityGuard guard(state);
++ if (guard.IsTooComplex()) return false;
+ if (ParseNestedName(state) || ParseLocalName(state)) {
+ return true;
+ }
+
+- State copy = *state;
+- if (ParseUnscopedTemplateName(state) &&
++ // We reorganize the productions to avoid re-parsing unscoped names.
++ // - Inline <unscoped-template-name> productions:
++ // <name> ::= <substitution> <template-args>
++ // ::= <unscoped-name> <template-args>
++ // ::= <unscoped-name>
++ // - Merge the two productions that start with unscoped-name:
++ // <name> ::= <unscoped-name> [<template-args>]
++
++ ParseState copy = state->parse_state;
++ // "std<...>" isn't a valid name.
++ if (ParseSubstitution(state, /*accept_std=*/false) &&
+ ParseTemplateArgs(state)) {
+ return true;
+ }
+- *state = copy;
++ state->parse_state = copy;
+
+- // Less greedy than <unscoped-template-name> <template-args>.
+- if (ParseUnscopedName(state)) {
+- return true;
+- }
+- return false;
++ // Note there's no need to restore state after this since only the first
++ // subparser can fail.
++ return ParseUnscopedName(state) && Optional(ParseTemplateArgs(state));
+ }
+
+ // <unscoped-name> ::= <unqualified-name>
+ // ::= St <unqualified-name>
+ static bool ParseUnscopedName(State *state) {
++ ComplexityGuard guard(state);
++ if (guard.IsTooComplex()) return false;
+ if (ParseUnqualifiedName(state)) {
+ return true;
+ }
+
+- State copy = *state;
+- if (ParseTwoCharToken(state, "St") &&
+- MaybeAppend(state, "std::") &&
++ ParseState copy = state->parse_state;
++ if (ParseTwoCharToken(state, "St") && MaybeAppend(state, "std::") &&
+ ParseUnqualifiedName(state)) {
+ return true;
+ }
+- *state = copy;
++ state->parse_state = copy;
+ return false;
+ }
+
+-// <unscoped-template-name> ::= <unscoped-name>
+-// ::= <substitution>
+-static bool ParseUnscopedTemplateName(State *state) {
+- return ParseUnscopedName(state) || ParseSubstitution(state);
++// <ref-qualifer> ::= R // lvalue method reference qualifier
++// ::= O // rvalue method reference qualifier
++static inline bool ParseRefQualifier(State *state) {
++ return ParseCharClass(state, "OR");
+ }
+
+-// <nested-name> ::= N [<CV-qualifiers>] <prefix> <unqualified-name> E
+-// ::= N [<CV-qualifiers>] <template-prefix> <template-args> E
++// <nested-name> ::= N [<CV-qualifiers>] [<ref-qualifier>] <prefix>
++// <unqualified-name> E
++// ::= N [<CV-qualifiers>] [<ref-qualifier>] <template-prefix>
++// <template-args> E
+ static bool ParseNestedName(State *state) {
+- State copy = *state;
+- if (ParseOneCharToken(state, 'N') &&
+- EnterNestedName(state) &&
++ ComplexityGuard guard(state);
++ if (guard.IsTooComplex()) return false;
++ ParseState copy = state->parse_state;
++ if (ParseOneCharToken(state, 'N') && EnterNestedName(state) &&
+ Optional(ParseCVQualifiers(state)) &&
+- ParsePrefix(state) &&
++ Optional(ParseRefQualifier(state)) && ParsePrefix(state) &&
+ LeaveNestedName(state, copy.nest_level) &&
+ ParseOneCharToken(state, 'E')) {
+ return true;
+ }
+- *state = copy;
++ state->parse_state = copy;
+ return false;
+ }
+
+@@ -574,12 +741,15 @@ static bool ParseNestedName(State *state) {
+ // ::= <template-param>
+ // ::= <substitution>
+ static bool ParsePrefix(State *state) {
++ ComplexityGuard guard(state);
++ if (guard.IsTooComplex()) return false;
+ bool has_something = false;
+ while (true) {
+ MaybeAppendSeparator(state);
+ if (ParseTemplateParam(state) ||
+- ParseSubstitution(state) ||
+- ParseUnscopedName(state)) {
++ ParseSubstitution(state, /*accept_std=*/true) ||
++ ParseUnscopedName(state) ||
++ (ParseOneCharToken(state, 'M') && ParseUnnamedTypeName(state))) {
+ has_something = true;
+ MaybeIncreaseNestLevel(state);
+ continue;
+@@ -594,40 +764,112 @@ static bool ParsePrefix(State *state) {
+ return true;
+ }
+
+-// <unqualified-name> ::= <operator-name>
+-// ::= <ctor-dtor-name>
++// <unqualified-name> ::= <operator-name> [<abi-tags>]
++// ::= <ctor-dtor-name> [<abi-tags>]
+ // ::= <source-name> [<abi-tags>]
+ // ::= <local-source-name> [<abi-tags>]
++// ::= <unnamed-type-name> [<abi-tags>]
++//
++// <local-source-name> is a GCC extension; see below.
+ static bool ParseUnqualifiedName(State *state) {
+- return (ParseOperatorName(state) ||
+- ParseCtorDtorName(state) ||
+- (ParseSourceName(state) && Optional(ParseAbiTags(state))) ||
+- (ParseLocalSourceName(state) && Optional(ParseAbiTags(state))));
++ ComplexityGuard guard(state);
++ if (guard.IsTooComplex()) return false;
++ if (ParseOperatorName(state, nullptr) || ParseCtorDtorName(state) ||
++ ParseSourceName(state) || ParseLocalSourceName(state) ||
++ ParseUnnamedTypeName(state)) {
++ return ParseAbiTags(state);
++ }
++ return false;
++}
++
++// <abi-tags> ::= <abi-tag> [<abi-tags>]
++// <abi-tag> ::= B <source-name>
++static bool ParseAbiTags(State *state) {
++ ComplexityGuard guard(state);
++ if (guard.IsTooComplex()) return false;
++
++ while (ParseOneCharToken(state, 'B')) {
++ ParseState copy = state->parse_state;
++ MaybeAppend(state, "[abi:");
++
++ if (!ParseSourceName(state)) {
++ state->parse_state = copy;
++ return false;
++ }
++ MaybeAppend(state, "]");
++ }
++
++ return true;
+ }
+
+ // <source-name> ::= <positive length number> <identifier>
+ static bool ParseSourceName(State *state) {
+- State copy = *state;
++ ComplexityGuard guard(state);
++ if (guard.IsTooComplex()) return false;
++ ParseState copy = state->parse_state;
+ int length = -1;
+- if (ParseNumber(state, &length) && ParseIdentifier(state, length)) {
++ if (ParseNumber(state, &length) &&
++ ParseIdentifier(state, static_cast<size_t>(length))) {
+ return true;
+ }
+- *state = copy;
++ state->parse_state = copy;
+ return false;
+ }
+
+ // <local-source-name> ::= L <source-name> [<discriminator>]
+ //
+ // References:
+-// http://gcc.gnu.org/bugzilla/show_bug.cgi?id=31775
+-// http://gcc.gnu.org/viewcvs?view=rev&revision=124467
++// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=31775
++// https://gcc.gnu.org/viewcvs?view=rev&revision=124467
+ static bool ParseLocalSourceName(State *state) {
+- State copy = *state;
++ ComplexityGuard guard(state);
++ if (guard.IsTooComplex()) return false;
++ ParseState copy = state->parse_state;
+ if (ParseOneCharToken(state, 'L') && ParseSourceName(state) &&
+ Optional(ParseDiscriminator(state))) {
+ return true;
+ }
+- *state = copy;
++ state->parse_state = copy;
++ return false;
++}
++
++// <unnamed-type-name> ::= Ut [<(nonnegative) number>] _
++// ::= <closure-type-name>
++// <closure-type-name> ::= Ul <lambda-sig> E [<(nonnegative) number>] _
++// <lambda-sig> ::= <(parameter) type>+
++static bool ParseUnnamedTypeName(State *state) {
++ ComplexityGuard guard(state);
++ if (guard.IsTooComplex()) return false;
++ ParseState copy = state->parse_state;
++ // Type's 1-based index n is encoded as { "", n == 1; itoa(n-2), otherwise }.
++ // Optionally parse the encoded value into 'which' and add 2 to get the index.
++ int which = -1;
++
++ // Unnamed type local to function or class.
++ if (ParseTwoCharToken(state, "Ut") && Optional(ParseNumber(state, &which)) &&
++ which <= std::numeric_limits<int>::max() - 2 && // Don't overflow.
++ ParseOneCharToken(state, '_')) {
++ MaybeAppend(state, "{unnamed type#");
++ MaybeAppendDecimal(state, 2 + which);
++ MaybeAppend(state, "}");
++ return true;
++ }
++ state->parse_state = copy;
++
++ // Closure type.
++ which = -1;
++ if (ParseTwoCharToken(state, "Ul") && DisableAppend(state) &&
++ OneOrMore(ParseType, state) && RestoreAppend(state, copy.append) &&
++ ParseOneCharToken(state, 'E') && Optional(ParseNumber(state, &which)) &&
++ which <= std::numeric_limits<int>::max() - 2 && // Don't overflow.
++ ParseOneCharToken(state, '_')) {
++ MaybeAppend(state, "{lambda()#");
++ MaybeAppendDecimal(state, 2 + which);
++ MaybeAppend(state, "}");
++ return true;
++ }
++ state->parse_state = copy;
++
+ return false;
+ }
+
+@@ -635,23 +877,32 @@ static bool ParseLocalSourceName(State *state) {
+ // If "number_out" is non-null, then *number_out is set to the value of the
+ // parsed number on success.
+ static bool ParseNumber(State *state, int *number_out) {
+- int sign = 1;
++ ComplexityGuard guard(state);
++ if (guard.IsTooComplex()) return false;
++ bool negative = false;
+ if (ParseOneCharToken(state, 'n')) {
+- sign = -1;
++ negative = true;
+ }
+- const char *p = state->mangled_cur;
+- int number = 0;
+- for (;*p != '\0'; ++p) {
++ const char *p = RemainingInput(state);
++ uint64_t number = 0;
++ for (; *p != '\0'; ++p) {
+ if (IsDigit(*p)) {
+- number = number * 10 + (*p - '0');
++ number = number * 10 + static_cast<uint64_t>(*p - '0');
+ } else {
+ break;
+ }
+ }
+- if (p != state->mangled_cur) { // Conversion succeeded.
+- state->mangled_cur = p;
+- if (number_out != NULL) {
+- *number_out = number * sign;
++ // Apply the sign with uint64_t arithmetic so overflows aren't UB. Gives
++ // "incorrect" results for out-of-range inputs, but negative values only
++ // appear for literals, which aren't printed.
++ if (negative) {
++ number = ~number + 1;
++ }
++ if (p != RemainingInput(state)) { // Conversion succeeded.
++ state->parse_state.mangled_idx += p - RemainingInput(state);
++ if (number_out != nullptr) {
++ // Note: possibly truncate "number".
++ *number_out = static_cast<int>(number);
+ }
+ return true;
+ }
+@@ -661,14 +912,16 @@ static bool ParseNumber(State *state, int *number_out) {
+ // Floating-point literals are encoded using a fixed-length lowercase
+ // hexadecimal string.
+ static bool ParseFloatNumber(State *state) {
+- const char *p = state->mangled_cur;
+- for (;*p != '\0'; ++p) {
++ ComplexityGuard guard(state);
++ if (guard.IsTooComplex()) return false;
++ const char *p = RemainingInput(state);
++ for (; *p != '\0'; ++p) {
+ if (!IsDigit(*p) && !(*p >= 'a' && *p <= 'f')) {
+ break;
+ }
+ }
+- if (p != state->mangled_cur) { // Conversion succeeded.
+- state->mangled_cur = p;
++ if (p != RemainingInput(state)) { // Conversion succeeded.
++ state->parse_state.mangled_idx += p - RemainingInput(state);
+ return true;
+ }
+ return false;
+@@ -677,93 +930,85 @@ static bool ParseFloatNumber(State *state) {
+ // The <seq-id> is a sequence number in base 36,
+ // using digits and upper case letters
+ static bool ParseSeqId(State *state) {
+- const char *p = state->mangled_cur;
+- for (;*p != '\0'; ++p) {
++ ComplexityGuard guard(state);
++ if (guard.IsTooComplex()) return false;
++ const char *p = RemainingInput(state);
++ for (; *p != '\0'; ++p) {
+ if (!IsDigit(*p) && !(*p >= 'A' && *p <= 'Z')) {
+ break;
+ }
+ }
+- if (p != state->mangled_cur) { // Conversion succeeded.
+- state->mangled_cur = p;
++ if (p != RemainingInput(state)) { // Conversion succeeded.
++ state->parse_state.mangled_idx += p - RemainingInput(state);
+ return true;
+ }
+ return false;
+ }
+
+ // <identifier> ::= <unqualified source code identifier> (of given length)
+-static bool ParseIdentifier(State *state, ssize_t length) {
+- if (length == -1 ||
+- !AtLeastNumCharsRemaining(state->mangled_cur, length)) {
++static bool ParseIdentifier(State *state, size_t length) {
++ ComplexityGuard guard(state);
++ if (guard.IsTooComplex()) return false;
++ if (!AtLeastNumCharsRemaining(RemainingInput(state), length)) {
+ return false;
+ }
+ if (IdentifierIsAnonymousNamespace(state, length)) {
+ MaybeAppend(state, "(anonymous namespace)");
+ } else {
+- MaybeAppendWithLength(state, state->mangled_cur, length);
++ MaybeAppendWithLength(state, RemainingInput(state), length);
+ }
+- state->mangled_cur += length;
++ state->parse_state.mangled_idx += length;
+ return true;
+ }
+
+-// <abi-tags> ::= <abi-tag> [<abi-tags>]
+-static bool ParseAbiTags(State *state) {
+- State copy = *state;
+- DisableAppend(state);
+- if (OneOrMore(ParseAbiTag, state)) {
+- RestoreAppend(state, copy.append);
+- return true;
+- }
+- *state = copy;
+- return false;
+-}
+-
+-// <abi-tag> ::= B <source-name>
+-static bool ParseAbiTag(State *state) {
+- return ParseOneCharToken(state, 'B') && ParseSourceName(state);
+-}
+-
+ // <operator-name> ::= nw, and other two letters cases
+ // ::= cv <type> # (cast)
+ // ::= v <digit> <source-name> # vendor extended operator
+-static bool ParseOperatorName(State *state) {
+- if (!AtLeastNumCharsRemaining(state->mangled_cur, 2)) {
++static bool ParseOperatorName(State *state, int *arity) {
++ ComplexityGuard guard(state);
++ if (guard.IsTooComplex()) return false;
++ if (!AtLeastNumCharsRemaining(RemainingInput(state), 2)) {
+ return false;
+ }
+ // First check with "cv" (cast) case.
+- State copy = *state;
+- if (ParseTwoCharToken(state, "cv") &&
+- MaybeAppend(state, "operator ") &&
+- EnterNestedName(state) &&
+- ParseType(state) &&
++ ParseState copy = state->parse_state;
++ if (ParseTwoCharToken(state, "cv") && MaybeAppend(state, "operator ") &&
++ EnterNestedName(state) && ParseType(state) &&
+ LeaveNestedName(state, copy.nest_level)) {
++ if (arity != nullptr) {
++ *arity = 1;
++ }
+ return true;
+ }
+- *state = copy;
++ state->parse_state = copy;
+
+ // Then vendor extended operators.
+- if (ParseOneCharToken(state, 'v') && ParseCharClass(state, "0123456789") &&
++ if (ParseOneCharToken(state, 'v') && ParseDigit(state, arity) &&
+ ParseSourceName(state)) {
+ return true;
+ }
+- *state = copy;
++ state->parse_state = copy;
+
+ // Other operator names should start with a lower alphabet followed
+ // by a lower/upper alphabet.
+- if (!(IsLower(state->mangled_cur[0]) &&
+- IsAlpha(state->mangled_cur[1]))) {
++ if (!(IsLower(RemainingInput(state)[0]) &&
++ IsAlpha(RemainingInput(state)[1]))) {
+ return false;
+ }
+ // We may want to perform a binary search if we really need speed.
+ const AbbrevPair *p;
+- for (p = kOperatorList; p->abbrev != NULL; ++p) {
+- if (state->mangled_cur[0] == p->abbrev[0] &&
+- state->mangled_cur[1] == p->abbrev[1]) {
++ for (p = kOperatorList; p->abbrev != nullptr; ++p) {
++ if (RemainingInput(state)[0] == p->abbrev[0] &&
++ RemainingInput(state)[1] == p->abbrev[1]) {
++ if (arity != nullptr) {
++ *arity = p->arity;
++ }
+ MaybeAppend(state, "operator");
+ if (IsLower(*p->real_name)) { // new, delete, etc.
+ MaybeAppend(state, " ");
+ }
+ MaybeAppend(state, p->real_name);
+- state->mangled_cur += 2;
++ state->parse_state.mangled_idx += 2;
+ return true;
+ }
+ }
+@@ -774,6 +1019,7 @@ static bool ParseOperatorName(State *state) {
+ // ::= TT <type>
+ // ::= TI <type>
+ // ::= TS <type>
++// ::= TH <type> # thread-local
+ // ::= Tc <call-offset> <call-offset> <(base) encoding>
+ // ::= GV <(object) name>
+ // ::= T <call-offset> <(base) encoding>
+@@ -789,123 +1035,156 @@ static bool ParseOperatorName(State *state) {
+ // Note: we don't care much about them since they don't appear in
+ // stack traces. The are special data.
+ static bool ParseSpecialName(State *state) {
+- State copy = *state;
+- if (ParseOneCharToken(state, 'T') &&
+- ParseCharClass(state, "VTIS") &&
++ ComplexityGuard guard(state);
++ if (guard.IsTooComplex()) return false;
++ ParseState copy = state->parse_state;
++ if (ParseOneCharToken(state, 'T') && ParseCharClass(state, "VTISH") &&
+ ParseType(state)) {
+ return true;
+ }
+- *state = copy;
++ state->parse_state = copy;
+
+ if (ParseTwoCharToken(state, "Tc") && ParseCallOffset(state) &&
+ ParseCallOffset(state) && ParseEncoding(state)) {
+ return true;
+ }
+- *state = copy;
++ state->parse_state = copy;
+
+- if (ParseTwoCharToken(state, "GV") &&
+- ParseName(state)) {
++ if (ParseTwoCharToken(state, "GV") && ParseName(state)) {
+ return true;
+ }
+- *state = copy;
++ state->parse_state = copy;
+
+ if (ParseOneCharToken(state, 'T') && ParseCallOffset(state) &&
+ ParseEncoding(state)) {
+ return true;
+ }
+- *state = copy;
++ state->parse_state = copy;
+
+ // G++ extensions
+ if (ParseTwoCharToken(state, "TC") && ParseType(state) &&
+- ParseNumber(state, NULL) && ParseOneCharToken(state, '_') &&
+- DisableAppend(state) &&
+- ParseType(state)) {
++ ParseNumber(state, nullptr) && ParseOneCharToken(state, '_') &&
++ DisableAppend(state) && ParseType(state)) {
+ RestoreAppend(state, copy.append);
+ return true;
+ }
+- *state = copy;
++ state->parse_state = copy;
+
+ if (ParseOneCharToken(state, 'T') && ParseCharClass(state, "FJ") &&
+ ParseType(state)) {
+ return true;
+ }
+- *state = copy;
++ state->parse_state = copy;
+
+ if (ParseTwoCharToken(state, "GR") && ParseName(state)) {
+ return true;
+ }
+- *state = copy;
++ state->parse_state = copy;
+
+ if (ParseTwoCharToken(state, "GA") && ParseEncoding(state)) {
+ return true;
+ }
+- *state = copy;
++ state->parse_state = copy;
+
+ if (ParseOneCharToken(state, 'T') && ParseCharClass(state, "hv") &&
+ ParseCallOffset(state) && ParseEncoding(state)) {
+ return true;
+ }
+- *state = copy;
++ state->parse_state = copy;
+ return false;
+ }
+
+ // <call-offset> ::= h <nv-offset> _
+ // ::= v <v-offset> _
+ static bool ParseCallOffset(State *state) {
+- State copy = *state;
+- if (ParseOneCharToken(state, 'h') &&
+- ParseNVOffset(state) && ParseOneCharToken(state, '_')) {
++ ComplexityGuard guard(state);
++ if (guard.IsTooComplex()) return false;
++ ParseState copy = state->parse_state;
++ if (ParseOneCharToken(state, 'h') && ParseNVOffset(state) &&
++ ParseOneCharToken(state, '_')) {
+ return true;
+ }
+- *state = copy;
++ state->parse_state = copy;
+
+- if (ParseOneCharToken(state, 'v') &&
+- ParseVOffset(state) && ParseOneCharToken(state, '_')) {
++ if (ParseOneCharToken(state, 'v') && ParseVOffset(state) &&
++ ParseOneCharToken(state, '_')) {
+ return true;
+ }
+- *state = copy;
++ state->parse_state = copy;
+
+ return false;
+ }
+
+ // <nv-offset> ::= <(offset) number>
+ static bool ParseNVOffset(State *state) {
+- return ParseNumber(state, NULL);
++ ComplexityGuard guard(state);
++ if (guard.IsTooComplex()) return false;
++ return ParseNumber(state, nullptr);
+ }
+
+ // <v-offset> ::= <(offset) number> _ <(virtual offset) number>
+ static bool ParseVOffset(State *state) {
+- State copy = *state;
+- if (ParseNumber(state, NULL) && ParseOneCharToken(state, '_') &&
+- ParseNumber(state, NULL)) {
++ ComplexityGuard guard(state);
++ if (guard.IsTooComplex()) return false;
++ ParseState copy = state->parse_state;
++ if (ParseNumber(state, nullptr) && ParseOneCharToken(state, '_') &&
++ ParseNumber(state, nullptr)) {
+ return true;
+ }
+- *state = copy;
++ state->parse_state = copy;
+ return false;
+ }
+
+-// <ctor-dtor-name> ::= C1 | C2 | C3
++// <ctor-dtor-name> ::= C1 | C2 | C3 | CI1 <base-class-type> | CI2
++// <base-class-type>
+ // ::= D0 | D1 | D2
++// # GCC extensions: "unified" constructor/destructor. See
++// #
++// https://github.com/gcc-mirror/gcc/blob/7ad17b583c3643bd4557f29b8391ca7ef08391f5/gcc/cp/mangle.c#L1847
++// ::= C4 | D4
+ static bool ParseCtorDtorName(State *state) {
+- State copy = *state;
+- if (ParseOneCharToken(state, 'C') &&
+- ParseCharClass(state, "123")) {
+- const char * const prev_name = state->prev_name;
+- const ssize_t prev_name_length = state->prev_name_length;
+- MaybeAppendWithLength(state, prev_name, prev_name_length);
+- return true;
++ ComplexityGuard guard(state);
++ if (guard.IsTooComplex()) return false;
++ ParseState copy = state->parse_state;
++ if (ParseOneCharToken(state, 'C')) {
++ if (ParseCharClass(state, "1234")) {
++ const char *const prev_name =
++ state->out + state->parse_state.prev_name_idx;
++ MaybeAppendWithLength(state, prev_name,
++ state->parse_state.prev_name_length);
++ return true;
++ } else if (ParseOneCharToken(state, 'I') && ParseCharClass(state, "12") &&
++ ParseClassEnumType(state)) {
++ return true;
++ }
+ }
+- *state = copy;
++ state->parse_state = copy;
+
+- if (ParseOneCharToken(state, 'D') &&
+- ParseCharClass(state, "012")) {
+- const char * const prev_name = state->prev_name;
+- const ssize_t prev_name_length = state->prev_name_length;
++ if (ParseOneCharToken(state, 'D') && ParseCharClass(state, "0124")) {
++ const char *const prev_name = state->out + state->parse_state.prev_name_idx;
+ MaybeAppend(state, "~");
+- MaybeAppendWithLength(state, prev_name, prev_name_length);
++ MaybeAppendWithLength(state, prev_name,
++ state->parse_state.prev_name_length);
+ return true;
+ }
+- *state = copy;
++ state->parse_state = copy;
++ return false;
++}
++
++// <decltype> ::= Dt <expression> E # decltype of an id-expression or class
++// # member access (C++0x)
++// ::= DT <expression> E # decltype of an expression (C++0x)
++static bool ParseDecltype(State *state) {
++ ComplexityGuard guard(state);
++ if (guard.IsTooComplex()) return false;
++
++ ParseState copy = state->parse_state;
++ if (ParseOneCharToken(state, 'D') && ParseCharClass(state, "tT") &&
++ ParseExpression(state) && ParseOneCharToken(state, 'E')) {
++ return true;
++ }
++ state->parse_state = copy;
++
+ return false;
+ }
+
+@@ -918,67 +1197,87 @@ static bool ParseCtorDtorName(State *state) {
+ // ::= U <source-name> <type> # vendor extended type qualifier
+ // ::= <builtin-type>
+ // ::= <function-type>
+-// ::= <class-enum-type>
++// ::= <class-enum-type> # note: just an alias for <name>
+ // ::= <array-type>
+ // ::= <pointer-to-member-type>
+ // ::= <template-template-param> <template-args>
+ // ::= <template-param>
++// ::= <decltype>
+ // ::= <substitution>
+ // ::= Dp <type> # pack expansion of (C++0x)
+-// ::= Dt <expression> E # decltype of an id-expression or class
+-// # member access (C++0x)
+-// ::= DT <expression> E # decltype of an expression (C++0x)
++// ::= Dv <num-elems> _ # GNU vector extension
+ //
+ static bool ParseType(State *state) {
++ ComplexityGuard guard(state);
++ if (guard.IsTooComplex()) return false;
++ ParseState copy = state->parse_state;
++
+ // We should check CV-qualifers, and PRGC things first.
+- State copy = *state;
+- if (ParseCVQualifiers(state) && ParseType(state)) {
+- return true;
++ //
++ // CV-qualifiers overlap with some operator names, but an operator name is not
++ // valid as a type. To avoid an ambiguity that can lead to exponential time
++ // complexity, refuse to backtrack the CV-qualifiers.
++ //
++ // _Z4aoeuIrMvvE
++ // => _Z 4aoeuI rM v v E
++ // aoeu<operator%=, void, void>
++ // => _Z 4aoeuI r Mv v E
++ // aoeu<void void::* restrict>
++ //
++ // By consuming the CV-qualifiers first, the former parse is disabled.
++ if (ParseCVQualifiers(state)) {
++ const bool result = ParseType(state);
++ if (!result) state->parse_state = copy;
++ return result;
+ }
+- *state = copy;
++ state->parse_state = copy;
+
+- if (ParseCharClass(state, "OPRCG") && ParseType(state)) {
+- return true;
++ // Similarly, these tag characters can overlap with other <name>s resulting in
++ // two different parse prefixes that land on <template-args> in the same
++ // place, such as "C3r1xI...". So, disable the "ctor-name = C3" parse by
++ // refusing to backtrack the tag characters.
++ if (ParseCharClass(state, "OPRCG")) {
++ const bool result = ParseType(state);
++ if (!result) state->parse_state = copy;
++ return result;
+ }
+- *state = copy;
++ state->parse_state = copy;
+
+ if (ParseTwoCharToken(state, "Dp") && ParseType(state)) {
+ return true;
+ }
+- *state = copy;
+-
+- if (ParseOneCharToken(state, 'D') && ParseCharClass(state, "tT") &&
+- ParseExpression(state) && ParseOneCharToken(state, 'E')) {
+- return true;
+- }
+- *state = copy;
++ state->parse_state = copy;
+
+ if (ParseOneCharToken(state, 'U') && ParseSourceName(state) &&
+ ParseType(state)) {
+ return true;
+ }
+- *state = copy;
++ state->parse_state = copy;
+
+- if (ParseBuiltinType(state) ||
+- ParseFunctionType(state) ||
+- ParseClassEnumType(state) ||
+- ParseArrayType(state) ||
+- ParsePointerToMemberType(state) ||
+- ParseSubstitution(state)) {
++ if (ParseBuiltinType(state) || ParseFunctionType(state) ||
++ ParseClassEnumType(state) || ParseArrayType(state) ||
++ ParsePointerToMemberType(state) || ParseDecltype(state) ||
++ // "std" on its own isn't a type.
++ ParseSubstitution(state, /*accept_std=*/false)) {
+ return true;
+ }
+
+- if (ParseTemplateTemplateParam(state) &&
+- ParseTemplateArgs(state)) {
++ if (ParseTemplateTemplateParam(state) && ParseTemplateArgs(state)) {
+ return true;
+ }
+- *state = copy;
++ state->parse_state = copy;
+
+ // Less greedy than <template-template-param> <template-args>.
+ if (ParseTemplateParam(state)) {
+ return true;
+ }
+
++ if (ParseTwoCharToken(state, "Dv") && ParseNumber(state, nullptr) &&
++ ParseOneCharToken(state, '_')) {
++ return true;
++ }
++ state->parse_state = copy;
++
+ return false;
+ }
+
+@@ -986,6 +1285,8 @@ static bool ParseType(State *state) {
+ // We don't allow empty <CV-qualifiers> to avoid infinite loop in
+ // ParseType().
+ static bool ParseCVQualifiers(State *state) {
++ ComplexityGuard guard(state);
++ if (guard.IsTooComplex()) return false;
+ int num_cv_qualifiers = 0;
+ num_cv_qualifiers += ParseOneCharToken(state, 'r');
+ num_cv_qualifiers += ParseOneCharToken(state, 'V');
+@@ -993,208 +1294,499 @@ static bool ParseCVQualifiers(State *state) {
+ return num_cv_qualifiers > 0;
+ }
+
+-// <builtin-type> ::= v, etc.
++// <builtin-type> ::= v, etc. # single-character builtin types
+ // ::= u <source-name>
++// ::= Dd, etc. # two-character builtin types
++//
++// Not supported:
++// ::= DF <number> _ # _FloatN (N bits)
++//
+ static bool ParseBuiltinType(State *state) {
++ ComplexityGuard guard(state);
++ if (guard.IsTooComplex()) return false;
+ const AbbrevPair *p;
+- for (p = kBuiltinTypeList; p->abbrev != NULL; ++p) {
+- if (state->mangled_cur[0] == p->abbrev[0]) {
++ for (p = kBuiltinTypeList; p->abbrev != nullptr; ++p) {
++ // Guaranteed only 1- or 2-character strings in kBuiltinTypeList.
++ if (p->abbrev[1] == '\0') {
++ if (ParseOneCharToken(state, p->abbrev[0])) {
++ MaybeAppend(state, p->real_name);
++ return true;
++ }
++ } else if (p->abbrev[2] == '\0' && ParseTwoCharToken(state, p->abbrev)) {
+ MaybeAppend(state, p->real_name);
+- ++state->mangled_cur;
+ return true;
+ }
+ }
+
+- State copy = *state;
++ ParseState copy = state->parse_state;
+ if (ParseOneCharToken(state, 'u') && ParseSourceName(state)) {
+ return true;
+ }
+- *state = copy;
++ state->parse_state = copy;
+ return false;
+ }
+
+-// <function-type> ::= F [Y] <bare-function-type> E
++// <exception-spec> ::= Do # non-throwing
++// exception-specification (e.g.,
++// noexcept, throw())
++// ::= DO <expression> E # computed (instantiation-dependent)
++// noexcept
++// ::= Dw <type>+ E # dynamic exception specification
++// with instantiation-dependent types
++static bool ParseExceptionSpec(State *state) {
++ ComplexityGuard guard(state);
++ if (guard.IsTooComplex()) return false;
++
++ if (ParseTwoCharToken(state, "Do")) return true;
++
++ ParseState copy = state->parse_state;
++ if (ParseTwoCharToken(state, "DO") && ParseExpression(state) &&
++ ParseOneCharToken(state, 'E')) {
++ return true;
++ }
++ state->parse_state = copy;
++ if (ParseTwoCharToken(state, "Dw") && OneOrMore(ParseType, state) &&
++ ParseOneCharToken(state, 'E')) {
++ return true;
++ }
++ state->parse_state = copy;
++
++ return false;
++}
++
++// <function-type> ::= [exception-spec] F [Y] <bare-function-type> [O] E
+ static bool ParseFunctionType(State *state) {
+- State copy = *state;
+- if (ParseOneCharToken(state, 'F') &&
+- Optional(ParseOneCharToken(state, 'Y')) &&
+- ParseBareFunctionType(state) && ParseOneCharToken(state, 'E')) {
++ ComplexityGuard guard(state);
++ if (guard.IsTooComplex()) return false;
++ ParseState copy = state->parse_state;
++ if (Optional(ParseExceptionSpec(state)) && ParseOneCharToken(state, 'F') &&
++ Optional(ParseOneCharToken(state, 'Y')) && ParseBareFunctionType(state) &&
++ Optional(ParseOneCharToken(state, 'O')) &&
++ ParseOneCharToken(state, 'E')) {
+ return true;
+ }
+- *state = copy;
++ state->parse_state = copy;
+ return false;
+ }
+
+ // <bare-function-type> ::= <(signature) type>+
+ static bool ParseBareFunctionType(State *state) {
+- State copy = *state;
++ ComplexityGuard guard(state);
++ if (guard.IsTooComplex()) return false;
++ ParseState copy = state->parse_state;
+ DisableAppend(state);
+ if (OneOrMore(ParseType, state)) {
+ RestoreAppend(state, copy.append);
+ MaybeAppend(state, "()");
+ return true;
+ }
+- *state = copy;
++ state->parse_state = copy;
+ return false;
+ }
+
+ // <class-enum-type> ::= <name>
+ static bool ParseClassEnumType(State *state) {
++ ComplexityGuard guard(state);
++ if (guard.IsTooComplex()) return false;
+ return ParseName(state);
+ }
+
+ // <array-type> ::= A <(positive dimension) number> _ <(element) type>
+ // ::= A [<(dimension) expression>] _ <(element) type>
+ static bool ParseArrayType(State *state) {
+- State copy = *state;
+- if (ParseOneCharToken(state, 'A') && ParseNumber(state, NULL) &&
++ ComplexityGuard guard(state);
++ if (guard.IsTooComplex()) return false;
++ ParseState copy = state->parse_state;
++ if (ParseOneCharToken(state, 'A') && ParseNumber(state, nullptr) &&
+ ParseOneCharToken(state, '_') && ParseType(state)) {
+ return true;
+ }
+- *state = copy;
++ state->parse_state = copy;
+
+ if (ParseOneCharToken(state, 'A') && Optional(ParseExpression(state)) &&
+ ParseOneCharToken(state, '_') && ParseType(state)) {
+ return true;
+ }
+- *state = copy;
++ state->parse_state = copy;
+ return false;
+ }
+
+ // <pointer-to-member-type> ::= M <(class) type> <(member) type>
+ static bool ParsePointerToMemberType(State *state) {
+- State copy = *state;
+- if (ParseOneCharToken(state, 'M') && ParseType(state) &&
+- ParseType(state)) {
++ ComplexityGuard guard(state);
++ if (guard.IsTooComplex()) return false;
++ ParseState copy = state->parse_state;
++ if (ParseOneCharToken(state, 'M') && ParseType(state) && ParseType(state)) {
+ return true;
+ }
+- *state = copy;
++ state->parse_state = copy;
+ return false;
+ }
+
+ // <template-param> ::= T_
+ // ::= T <parameter-2 non-negative number> _
+ static bool ParseTemplateParam(State *state) {
++ ComplexityGuard guard(state);
++ if (guard.IsTooComplex()) return false;
+ if (ParseTwoCharToken(state, "T_")) {
+ MaybeAppend(state, "?"); // We don't support template substitutions.
+ return true;
+ }
+
+- State copy = *state;
+- if (ParseOneCharToken(state, 'T') && ParseNumber(state, NULL) &&
++ ParseState copy = state->parse_state;
++ if (ParseOneCharToken(state, 'T') && ParseNumber(state, nullptr) &&
+ ParseOneCharToken(state, '_')) {
+ MaybeAppend(state, "?"); // We don't support template substitutions.
+ return true;
+ }
+- *state = copy;
++ state->parse_state = copy;
+ return false;
+ }
+
+-
+ // <template-template-param> ::= <template-param>
+ // ::= <substitution>
+ static bool ParseTemplateTemplateParam(State *state) {
++ ComplexityGuard guard(state);
++ if (guard.IsTooComplex()) return false;
+ return (ParseTemplateParam(state) ||
+- ParseSubstitution(state));
++ // "std" on its own isn't a template.
++ ParseSubstitution(state, /*accept_std=*/false));
+ }
+
+ // <template-args> ::= I <template-arg>+ E
+ static bool ParseTemplateArgs(State *state) {
+- State copy = *state;
++ ComplexityGuard guard(state);
++ if (guard.IsTooComplex()) return false;
++ ParseState copy = state->parse_state;
+ DisableAppend(state);
+- if (ParseOneCharToken(state, 'I') &&
+- OneOrMore(ParseTemplateArg, state) &&
++ if (ParseOneCharToken(state, 'I') && OneOrMore(ParseTemplateArg, state) &&
+ ParseOneCharToken(state, 'E')) {
+ RestoreAppend(state, copy.append);
+ MaybeAppend(state, "<>");
+ return true;
+ }
+- *state = copy;
++ state->parse_state = copy;
+ return false;
+ }
+
+ // <template-arg> ::= <type>
+ // ::= <expr-primary>
+-// ::= I <template-arg>* E # argument pack
+ // ::= J <template-arg>* E # argument pack
+ // ::= X <expression> E
+ static bool ParseTemplateArg(State *state) {
+- State copy = *state;
+- if ((ParseOneCharToken(state, 'I') || ParseOneCharToken(state, 'J')) &&
+- ZeroOrMore(ParseTemplateArg, state) &&
++ ComplexityGuard guard(state);
++ if (guard.IsTooComplex()) return false;
++ ParseState copy = state->parse_state;
++ if (ParseOneCharToken(state, 'J') && ZeroOrMore(ParseTemplateArg, state) &&
+ ParseOneCharToken(state, 'E')) {
+ return true;
+ }
+- *state = copy;
++ state->parse_state = copy;
++
++ // There can be significant overlap between the following leading to
++ // exponential backtracking:
++ //
++ // <expr-primary> ::= L <type> <expr-cast-value> E
++ // e.g. L 2xxIvE 1 E
++ // <type> ==> <local-source-name> <template-args>
++ // e.g. L 2xx IvE
++ //
++ // This means parsing an entire <type> twice, and <type> can contain
++ // <template-arg>, so this can generate exponential backtracking. There is
++ // only overlap when the remaining input starts with "L <source-name>", so
++ // parse all cases that can start this way jointly to share the common prefix.
++ //
++ // We have:
++ //
++ // <template-arg> ::= <type>
++ // ::= <expr-primary>
++ //
++ // First, drop all the productions of <type> that must start with something
++ // other than 'L'. All that's left is <class-enum-type>; inline it.
++ //
++ // <type> ::= <nested-name> # starts with 'N'
++ // ::= <unscoped-name>
++ // ::= <unscoped-template-name> <template-args>
++ // ::= <local-name> # starts with 'Z'
++ //
++ // Drop and inline again:
++ //
++ // <type> ::= <unscoped-name>
++ // ::= <unscoped-name> <template-args>
++ // ::= <substitution> <template-args> # starts with 'S'
++ //
++ // Merge the first two, inline <unscoped-name>, drop last:
++ //
++ // <type> ::= <unqualified-name> [<template-args>]
++ // ::= St <unqualified-name> [<template-args>] # starts with 'S'
++ //
++ // Drop and inline:
++ //
++ // <type> ::= <operator-name> [<template-args>] # starts with lowercase
++ // ::= <ctor-dtor-name> [<template-args>] # starts with 'C' or 'D'
++ // ::= <source-name> [<template-args>] # starts with digit
++ // ::= <local-source-name> [<template-args>]
++ // ::= <unnamed-type-name> [<template-args>] # starts with 'U'
++ //
++ // One more time:
++ //
++ // <type> ::= L <source-name> [<template-args>]
++ //
++ // Likewise with <expr-primary>:
++ //
++ // <expr-primary> ::= L <type> <expr-cast-value> E
++ // ::= LZ <encoding> E # cannot overlap; drop
++ // ::= L <mangled_name> E # cannot overlap; drop
++ //
++ // By similar reasoning as shown above, the only <type>s starting with
++ // <source-name> are "<source-name> [<template-args>]". Inline this.
++ //
++ // <expr-primary> ::= L <source-name> [<template-args>] <expr-cast-value> E
++ //
++ // Now inline both of these into <template-arg>:
++ //
++ // <template-arg> ::= L <source-name> [<template-args>]
++ // ::= L <source-name> [<template-args>] <expr-cast-value> E
++ //
++ // Merge them and we're done:
++ // <template-arg>
++ // ::= L <source-name> [<template-args>] [<expr-cast-value> E]
++ if (ParseLocalSourceName(state) && Optional(ParseTemplateArgs(state))) {
++ copy = state->parse_state;
++ if (ParseExprCastValue(state) && ParseOneCharToken(state, 'E')) {
++ return true;
++ }
++ state->parse_state = copy;
++ return true;
++ }
+
+- if (ParseType(state) ||
+- ParseExprPrimary(state)) {
++ // Now that the overlapping cases can't reach this code, we can safely call
++ // both of these.
++ if (ParseType(state) || ParseExprPrimary(state)) {
+ return true;
+ }
+- *state = copy;
++ state->parse_state = copy;
+
+ if (ParseOneCharToken(state, 'X') && ParseExpression(state) &&
+ ParseOneCharToken(state, 'E')) {
+ return true;
+ }
+- *state = copy;
++ state->parse_state = copy;
+ return false;
+ }
+
+-// <expression> ::= <template-param>
+-// ::= <expr-primary>
+-// ::= <unary operator-name> <expression>
+-// ::= <binary operator-name> <expression> <expression>
+-// ::= <trinary operator-name> <expression> <expression>
+-// <expression>
++// <unresolved-type> ::= <template-param> [<template-args>]
++// ::= <decltype>
++// ::= <substitution>
++static inline bool ParseUnresolvedType(State *state) {
++ // No ComplexityGuard because we don't copy the state in this stack frame.
++ return (ParseTemplateParam(state) && Optional(ParseTemplateArgs(state))) ||
++ ParseDecltype(state) || ParseSubstitution(state, /*accept_std=*/false);
++}
++
++// <simple-id> ::= <source-name> [<template-args>]
++static inline bool ParseSimpleId(State *state) {
++ // No ComplexityGuard because we don't copy the state in this stack frame.
++
++ // Note: <simple-id> cannot be followed by a parameter pack; see comment in
++ // ParseUnresolvedType.
++ return ParseSourceName(state) && Optional(ParseTemplateArgs(state));
++}
++
++// <base-unresolved-name> ::= <source-name> [<template-args>]
++// ::= on <operator-name> [<template-args>]
++// ::= dn <destructor-name>
++static bool ParseBaseUnresolvedName(State *state) {
++ ComplexityGuard guard(state);
++ if (guard.IsTooComplex()) return false;
++
++ if (ParseSimpleId(state)) {
++ return true;
++ }
++
++ ParseState copy = state->parse_state;
++ if (ParseTwoCharToken(state, "on") && ParseOperatorName(state, nullptr) &&
++ Optional(ParseTemplateArgs(state))) {
++ return true;
++ }
++ state->parse_state = copy;
++
++ if (ParseTwoCharToken(state, "dn") &&
++ (ParseUnresolvedType(state) || ParseSimpleId(state))) {
++ return true;
++ }
++ state->parse_state = copy;
++
++ return false;
++}
++
++// <unresolved-name> ::= [gs] <base-unresolved-name>
++// ::= sr <unresolved-type> <base-unresolved-name>
++// ::= srN <unresolved-type> <unresolved-qualifier-level>+ E
++// <base-unresolved-name>
++// ::= [gs] sr <unresolved-qualifier-level>+ E
++// <base-unresolved-name>
++static bool ParseUnresolvedName(State *state) {
++ ComplexityGuard guard(state);
++ if (guard.IsTooComplex()) return false;
++
++ ParseState copy = state->parse_state;
++ if (Optional(ParseTwoCharToken(state, "gs")) &&
++ ParseBaseUnresolvedName(state)) {
++ return true;
++ }
++ state->parse_state = copy;
++
++ if (ParseTwoCharToken(state, "sr") && ParseUnresolvedType(state) &&
++ ParseBaseUnresolvedName(state)) {
++ return true;
++ }
++ state->parse_state = copy;
++
++ if (ParseTwoCharToken(state, "sr") && ParseOneCharToken(state, 'N') &&
++ ParseUnresolvedType(state) &&
++ OneOrMore(/* <unresolved-qualifier-level> ::= */ ParseSimpleId, state) &&
++ ParseOneCharToken(state, 'E') && ParseBaseUnresolvedName(state)) {
++ return true;
++ }
++ state->parse_state = copy;
++
++ if (Optional(ParseTwoCharToken(state, "gs")) &&
++ ParseTwoCharToken(state, "sr") &&
++ OneOrMore(/* <unresolved-qualifier-level> ::= */ ParseSimpleId, state) &&
++ ParseOneCharToken(state, 'E') && ParseBaseUnresolvedName(state)) {
++ return true;
++ }
++ state->parse_state = copy;
++
++ return false;
++}
++
++// <expression> ::= <1-ary operator-name> <expression>
++// ::= <2-ary operator-name> <expression> <expression>
++// ::= <3-ary operator-name> <expression> <expression> <expression>
++// ::= cl <expression>+ E
++// ::= cp <simple-id> <expression>* E # Clang-specific.
++// ::= cv <type> <expression> # type (expression)
++// ::= cv <type> _ <expression>* E # type (expr-list)
+ // ::= st <type>
++// ::= <template-param>
++// ::= <function-param>
++// ::= <expr-primary>
++// ::= dt <expression> <unresolved-name> # expr.name
++// ::= pt <expression> <unresolved-name> # expr->name
++// ::= sp <expression> # argument pack expansion
+ // ::= sr <type> <unqualified-name> <template-args>
+ // ::= sr <type> <unqualified-name>
++// <function-param> ::= fp <(top-level) CV-qualifiers> _
++// ::= fp <(top-level) CV-qualifiers> <number> _
++// ::= fL <number> p <(top-level) CV-qualifiers> _
++// ::= fL <number> p <(top-level) CV-qualifiers> <number> _
+ static bool ParseExpression(State *state) {
++ ComplexityGuard guard(state);
++ if (guard.IsTooComplex()) return false;
+ if (ParseTemplateParam(state) || ParseExprPrimary(state)) {
+ return true;
+ }
+
+- State copy = *state;
+- if (ParseOperatorName(state) &&
+- ParseExpression(state) &&
+- ParseExpression(state) &&
+- ParseExpression(state)) {
++ ParseState copy = state->parse_state;
++
++ // Object/function call expression.
++ if (ParseTwoCharToken(state, "cl") && OneOrMore(ParseExpression, state) &&
++ ParseOneCharToken(state, 'E')) {
+ return true;
+ }
+- *state = copy;
++ state->parse_state = copy;
+
+- if (ParseOperatorName(state) &&
+- ParseExpression(state) &&
+- ParseExpression(state)) {
++ // Clang-specific "cp <simple-id> <expression>* E"
++ // https://clang.llvm.org/doxygen/ItaniumMangle_8cpp_source.html#l04338
++ if (ParseTwoCharToken(state, "cp") && ParseSimpleId(state) &&
++ ZeroOrMore(ParseExpression, state) && ParseOneCharToken(state, 'E')) {
+ return true;
+ }
+- *state = copy;
++ state->parse_state = copy;
+
+- if (ParseOperatorName(state) &&
+- ParseExpression(state)) {
++ // Function-param expression (level 0).
++ if (ParseTwoCharToken(state, "fp") && Optional(ParseCVQualifiers(state)) &&
++ Optional(ParseNumber(state, nullptr)) && ParseOneCharToken(state, '_')) {
+ return true;
+ }
+- *state = copy;
++ state->parse_state = copy;
+
++ // Function-param expression (level 1+).
++ if (ParseTwoCharToken(state, "fL") && Optional(ParseNumber(state, nullptr)) &&
++ ParseOneCharToken(state, 'p') && Optional(ParseCVQualifiers(state)) &&
++ Optional(ParseNumber(state, nullptr)) && ParseOneCharToken(state, '_')) {
++ return true;
++ }
++ state->parse_state = copy;
++
++ // Parse the conversion expressions jointly to avoid re-parsing the <type> in
++ // their common prefix. Parsed as:
++ // <expression> ::= cv <type> <conversion-args>
++ // <conversion-args> ::= _ <expression>* E
++ // ::= <expression>
++ //
++ // Also don't try ParseOperatorName after seeing "cv", since ParseOperatorName
++ // also needs to accept "cv <type>" in other contexts.
++ if (ParseTwoCharToken(state, "cv")) {
++ if (ParseType(state)) {
++ ParseState copy2 = state->parse_state;
++ if (ParseOneCharToken(state, '_') && ZeroOrMore(ParseExpression, state) &&
++ ParseOneCharToken(state, 'E')) {
++ return true;
++ }
++ state->parse_state = copy2;
++ if (ParseExpression(state)) {
++ return true;
++ }
++ }
++ } else {
++ // Parse unary, binary, and ternary operator expressions jointly, taking
++ // care not to re-parse subexpressions repeatedly. Parse like:
++ // <expression> ::= <operator-name> <expression>
++ // [<one-to-two-expressions>]
++ // <one-to-two-expressions> ::= <expression> [<expression>]
++ int arity = -1;
++ if (ParseOperatorName(state, &arity) &&
++ arity > 0 && // 0 arity => disabled.
++ (arity < 3 || ParseExpression(state)) &&
++ (arity < 2 || ParseExpression(state)) &&
++ (arity < 1 || ParseExpression(state))) {
++ return true;
++ }
++ }
++ state->parse_state = copy;
++
++ // sizeof type
+ if (ParseTwoCharToken(state, "st") && ParseType(state)) {
+ return true;
+ }
+- *state = copy;
++ state->parse_state = copy;
+
+- if (ParseTwoCharToken(state, "sr") && ParseType(state) &&
+- ParseUnqualifiedName(state) &&
+- ParseTemplateArgs(state)) {
++ // Object and pointer member access expressions.
++ if ((ParseTwoCharToken(state, "dt") || ParseTwoCharToken(state, "pt")) &&
++ ParseExpression(state) && ParseType(state)) {
+ return true;
+ }
+- *state = copy;
++ state->parse_state = copy;
+
+- if (ParseTwoCharToken(state, "sr") && ParseType(state) &&
+- ParseUnqualifiedName(state)) {
++ // Pointer-to-member access expressions. This parses the same as a binary
++ // operator, but it's implemented separately because "ds" shouldn't be
++ // accepted in other contexts that parse an operator name.
++ if (ParseTwoCharToken(state, "ds") && ParseExpression(state) &&
++ ParseExpression(state)) {
+ return true;
+ }
+- *state = copy;
+- return false;
++ state->parse_state = copy;
++
++ // Parameter pack expansion
++ if (ParseTwoCharToken(state, "sp") && ParseExpression(state)) {
++ return true;
++ }
++ state->parse_state = copy;
++
++ return ParseUnresolvedName(state);
+ }
+
+ // <expr-primary> ::= L <type> <(value) number> E
+@@ -1202,116 +1794,194 @@ static bool ParseExpression(State *state) {
+ // ::= L <mangled-name> E
+ // // A bug in g++'s C++ ABI version 2 (-fabi-version=2).
+ // ::= LZ <encoding> E
++//
++// Warning, subtle: the "bug" LZ production above is ambiguous with the first
++// production where <type> starts with <local-name>, which can lead to
++// exponential backtracking in two scenarios:
++//
++// - When whatever follows the E in the <local-name> in the first production is
++// not a name, we backtrack the whole <encoding> and re-parse the whole thing.
++//
++// - When whatever follows the <local-name> in the first production is not a
++// number and this <expr-primary> may be followed by a name, we backtrack the
++// <name> and re-parse it.
++//
++// Moreover this ambiguity isn't always resolved -- for example, the following
++// has two different parses:
++//
++// _ZaaILZ4aoeuE1x1EvE
++// => operator&&<aoeu, x, E, void>
++// => operator&&<(aoeu::x)(1), void>
++//
++// To resolve this, we just do what GCC's demangler does, and refuse to parse
++// casts to <local-name> types.
+ static bool ParseExprPrimary(State *state) {
+- State copy = *state;
+- if (ParseOneCharToken(state, 'L') && ParseType(state) &&
+- ParseNumber(state, NULL) &&
+- ParseOneCharToken(state, 'E')) {
+- return true;
++ ComplexityGuard guard(state);
++ if (guard.IsTooComplex()) return false;
++ ParseState copy = state->parse_state;
++
++ // The "LZ" special case: if we see LZ, we commit to accept "LZ <encoding> E"
++ // or fail, no backtracking.
++ if (ParseTwoCharToken(state, "LZ")) {
++ if (ParseEncoding(state) && ParseOneCharToken(state, 'E')) {
++ return true;
++ }
++
++ state->parse_state = copy;
++ return false;
+ }
+- *state = copy;
+
++ // The merged cast production.
+ if (ParseOneCharToken(state, 'L') && ParseType(state) &&
+- ParseFloatNumber(state) &&
+- ParseOneCharToken(state, 'E')) {
++ ParseExprCastValue(state)) {
+ return true;
+ }
+- *state = copy;
++ state->parse_state = copy;
+
+ if (ParseOneCharToken(state, 'L') && ParseMangledName(state) &&
+ ParseOneCharToken(state, 'E')) {
+ return true;
+ }
+- *state = copy;
++ state->parse_state = copy;
+
+- if (ParseTwoCharToken(state, "LZ") && ParseEncoding(state) &&
+- ParseOneCharToken(state, 'E')) {
++ return false;
++}
++
++// <number> or <float>, followed by 'E', as described above ParseExprPrimary.
++static bool ParseExprCastValue(State *state) {
++ ComplexityGuard guard(state);
++ if (guard.IsTooComplex()) return false;
++ // We have to be able to backtrack after accepting a number because we could
++ // have e.g. "7fffE", which will accept "7" as a number but then fail to find
++ // the 'E'.
++ ParseState copy = state->parse_state;
++ if (ParseNumber(state, nullptr) && ParseOneCharToken(state, 'E')) {
+ return true;
+ }
+- *state = copy;
++ state->parse_state = copy;
++
++ if (ParseFloatNumber(state) && ParseOneCharToken(state, 'E')) {
++ return true;
++ }
++ state->parse_state = copy;
+
+ return false;
+ }
+
+-// <local-name> := Z <(function) encoding> E <(entity) name>
+-// [<discriminator>]
+-// := Z <(function) encoding> E s [<discriminator>]
+-static bool ParseLocalName(State *state) {
+- State copy = *state;
+- if (ParseOneCharToken(state, 'Z') && ParseEncoding(state) &&
+- ParseOneCharToken(state, 'E') && MaybeAppend(state, "::") &&
+- ParseName(state) && Optional(ParseDiscriminator(state))) {
++// <local-name> ::= Z <(function) encoding> E <(entity) name> [<discriminator>]
++// ::= Z <(function) encoding> E s [<discriminator>]
++//
++// Parsing a common prefix of these two productions together avoids an
++// exponential blowup of backtracking. Parse like:
++// <local-name> := Z <encoding> E <local-name-suffix>
++// <local-name-suffix> ::= s [<discriminator>]
++// ::= <name> [<discriminator>]
++
++static bool ParseLocalNameSuffix(State *state) {
++ ComplexityGuard guard(state);
++ if (guard.IsTooComplex()) return false;
++
++ if (MaybeAppend(state, "::") && ParseName(state) &&
++ Optional(ParseDiscriminator(state))) {
+ return true;
+ }
+- *state = copy;
+
++ // Since we're not going to overwrite the above "::" by re-parsing the
++ // <encoding> (whose trailing '\0' byte was in the byte now holding the
++ // first ':'), we have to rollback the "::" if the <name> parse failed.
++ if (state->parse_state.append) {
++ state->out[state->parse_state.out_cur_idx - 2] = '\0';
++ }
++
++ return ParseOneCharToken(state, 's') && Optional(ParseDiscriminator(state));
++}
++
++static bool ParseLocalName(State *state) {
++ ComplexityGuard guard(state);
++ if (guard.IsTooComplex()) return false;
++ ParseState copy = state->parse_state;
+ if (ParseOneCharToken(state, 'Z') && ParseEncoding(state) &&
+- ParseTwoCharToken(state, "Es") && Optional(ParseDiscriminator(state))) {
++ ParseOneCharToken(state, 'E') && ParseLocalNameSuffix(state)) {
+ return true;
+ }
+- *state = copy;
++ state->parse_state = copy;
+ return false;
+ }
+
+ // <discriminator> := _ <(non-negative) number>
+ static bool ParseDiscriminator(State *state) {
+- State copy = *state;
+- if (ParseOneCharToken(state, '_') && ParseNumber(state, NULL)) {
++ ComplexityGuard guard(state);
++ if (guard.IsTooComplex()) return false;
++ ParseState copy = state->parse_state;
++ if (ParseOneCharToken(state, '_') && ParseNumber(state, nullptr)) {
+ return true;
+ }
+- *state = copy;
++ state->parse_state = copy;
+ return false;
+ }
+
+ // <substitution> ::= S_
+ // ::= S <seq-id> _
+ // ::= St, etc.
+-static bool ParseSubstitution(State *state) {
++//
++// "St" is special in that it's not valid as a standalone name, and it *is*
++// allowed to precede a name without being wrapped in "N...E". This means that
++// if we accept it on its own, we can accept "St1a" and try to parse
++// template-args, then fail and backtrack, accept "St" on its own, then "1a" as
++// an unqualified name and re-parse the same template-args. To block this
++// exponential backtracking, we disable it with 'accept_std=false' in
++// problematic contexts.
++static bool ParseSubstitution(State *state, bool accept_std) {
++ ComplexityGuard guard(state);
++ if (guard.IsTooComplex()) return false;
+ if (ParseTwoCharToken(state, "S_")) {
+ MaybeAppend(state, "?"); // We don't support substitutions.
+ return true;
+ }
+
+- State copy = *state;
++ ParseState copy = state->parse_state;
+ if (ParseOneCharToken(state, 'S') && ParseSeqId(state) &&
+ ParseOneCharToken(state, '_')) {
+ MaybeAppend(state, "?"); // We don't support substitutions.
+ return true;
+ }
+- *state = copy;
++ state->parse_state = copy;
+
+ // Expand abbreviations like "St" => "std".
+ if (ParseOneCharToken(state, 'S')) {
+ const AbbrevPair *p;
+- for (p = kSubstitutionList; p->abbrev != NULL; ++p) {
+- if (state->mangled_cur[0] == p->abbrev[1]) {
++ for (p = kSubstitutionList; p->abbrev != nullptr; ++p) {
++ if (RemainingInput(state)[0] == p->abbrev[1] &&
++ (accept_std || p->abbrev[1] != 't')) {
+ MaybeAppend(state, "std");
+ if (p->real_name[0] != '\0') {
+ MaybeAppend(state, "::");
+ MaybeAppend(state, p->real_name);
+ }
+- ++state->mangled_cur;
++ ++state->parse_state.mangled_idx;
+ return true;
+ }
+ }
+ }
+- *state = copy;
++ state->parse_state = copy;
+ return false;
+ }
+
+ // Parse <mangled-name>, optionally followed by either a function-clone suffix
+ // or version suffix. Returns true only if all of "mangled_cur" was consumed.
+ static bool ParseTopLevelMangledName(State *state) {
++ ComplexityGuard guard(state);
++ if (guard.IsTooComplex()) return false;
+ if (ParseMangledName(state)) {
+- if (state->mangled_cur[0] != '\0') {
++ if (RemainingInput(state)[0] != '\0') {
+ // Drop trailing function clone suffix, if any.
+- if (IsFunctionCloneSuffix(state->mangled_cur)) {
++ if (IsFunctionCloneSuffix(RemainingInput(state))) {
+ return true;
+ }
+ // Append trailing version suffix if any.
+ // ex. _Z3foo@@GLIBCXX_3.4
+- if (state->mangled_cur[0] == '@') {
+- MaybeAppend(state, state->mangled_cur);
++ if (RemainingInput(state)[0] == '@') {
++ MaybeAppend(state, RemainingInput(state));
+ return true;
+ }
+ return false; // Unconsumed suffix.
+@@ -1320,6 +1990,10 @@ static bool ParseTopLevelMangledName(State *state) {
+ }
+ return false;
+ }
++
++static bool Overflowed(const State *state) {
++ return state->parse_state.out_cur_idx >= state->out_end_idx;
++}
+ #endif
+
+ // The demangler entry point.
+@@ -1356,7 +2030,8 @@ bool Demangle(const char *mangled, char *out, size_t out_size) {
+ #else
+ State state;
+ InitState(&state, mangled, out, out_size);
+- return ParseTopLevelMangledName(&state) && !state.overflowed;
++ return ParseTopLevelMangledName(&state) && !Overflowed(&state) &&
++ state.parse_state.out_cur_idx > 0;
+ #endif
+ }
+
+diff --git a/base/third_party/symbolize/demangle.h b/base/third_party/symbolize/demangle.h
+index 416f7ee153560..26e821a53c2cb 100644
+--- a/base/third_party/symbolize/demangle.h
++++ b/base/third_party/symbolize/demangle.h
+@@ -70,6 +70,8 @@
+ #ifndef BASE_DEMANGLE_H_
+ #define BASE_DEMANGLE_H_
+
++#include <stddef.h>
++
+ #include "config.h"
+ #include "glog/logging.h"
+
diff --git a/base/third_party/symbolize/patches/010-clang-format.patch b/base/third_party/symbolize/patches/010-clang-format.patch
new file mode 100644
index 0000000..3e786f0
--- /dev/null
+++ b/base/third_party/symbolize/patches/010-clang-format.patch
@@ -0,0 +1,1184 @@
+diff --git a/base/third_party/symbolize/demangle.cc b/base/third_party/symbolize/demangle.cc
+index 2632646dd4072..8db75f01071e2 100644
+--- a/base/third_party/symbolize/demangle.cc
++++ b/base/third_party/symbolize/demangle.cc
+@@ -139,8 +139,8 @@ static const AbbrevPair kBuiltinTypeList[] = {
+ {"g", "__float128", 0},
+ {"z", "ellipsis", 0},
+
+- {"De", "decimal128", 0}, // IEEE 754r decimal floating point (128 bits)
+- {"Dd", "decimal64", 0}, // IEEE 754r decimal floating point (64 bits)
++ {"De", "decimal128", 0}, // IEEE 754r decimal floating point (128 bits)
++ {"Dd", "decimal64", 0}, // IEEE 754r decimal floating point (64 bits)
+ {"Dc", "decltype(auto)", 0},
+ {"Da", "auto", 0},
+ {"Dn", "std::nullptr_t", 0}, // i.e., decltype(nullptr)
+@@ -148,7 +148,7 @@ static const AbbrevPair kBuiltinTypeList[] = {
+ {"Di", "char32_t", 0},
+ {"Du", "char8_t", 0},
+ {"Ds", "char16_t", 0},
+- {"Dh", "float16", 0}, // IEEE 754r half-precision float (16 bits)
++ {"Dh", "float16", 0}, // IEEE 754r half-precision float (16 bits)
+ {nullptr, nullptr, 0},
+ };
+
+@@ -193,8 +193,8 @@ static_assert(sizeof(ParseState) == 4 * sizeof(int),
+ // Only one copy of this exists for each call to Demangle, so the size of this
+ // struct is nearly inconsequential.
+ typedef struct {
+- const char *mangled_begin; // Beginning of input string.
+- char *out; // Beginning of output string.
++ const char* mangled_begin; // Beginning of input string.
++ char* out; // Beginning of output string.
+ int out_end_idx; // One past last allowed output character.
+ int recursion_depth; // For stack exhaustion prevention.
+ int steps; // Cap how much work we'll do, regardless of depth.
+@@ -206,7 +206,7 @@ namespace {
+ // Also prevent unbounded handling of complex inputs.
+ class ComplexityGuard {
+ public:
+- explicit ComplexityGuard(State *state) : state_(state) {
++ explicit ComplexityGuard(State* state) : state_(state) {
+ ++state->recursion_depth;
+ ++state->steps;
+ }
+@@ -239,7 +239,7 @@ class ComplexityGuard {
+ }
+
+ private:
+- State *state_;
++ State* state_;
+ };
+ } // namespace
+
+@@ -255,7 +255,7 @@ static size_t StrLen(const char *str) {
+ }
+
+ // Returns true if "str" has at least "n" characters remaining.
+-static bool AtLeastNumCharsRemaining(const char *str, size_t n) {
++static bool AtLeastNumCharsRemaining(const char* str, size_t n) {
+ for (size_t i = 0; i < n; ++i) {
+ if (str[i] == '\0') {
+ return false;
+@@ -291,7 +291,7 @@ static void InitState(State* state,
+ state->parse_state.append = true;
+ }
+
+-static inline const char *RemainingInput(State *state) {
++static inline const char* RemainingInput(State* state) {
+ return &state->mangled_begin[state->parse_state.mangled_idx];
+ }
+
+@@ -300,7 +300,9 @@ static inline const char *RemainingInput(State *state) {
+ // not contain '\0'.
+ static bool ParseOneCharToken(State *state, const char one_char_token) {
+ ComplexityGuard guard(state);
+- if (guard.IsTooComplex()) return false;
++ if (guard.IsTooComplex()) {
++ return false;
++ }
+ if (RemainingInput(state)[0] == one_char_token) {
+ ++state->parse_state.mangled_idx;
+ return true;
+@@ -313,7 +315,9 @@ static bool ParseOneCharToken(State *state, const char one_char_token) {
+ // not contain '\0'.
+ static bool ParseTwoCharToken(State *state, const char *two_char_token) {
+ ComplexityGuard guard(state);
+- if (guard.IsTooComplex()) return false;
++ if (guard.IsTooComplex()) {
++ return false;
++ }
+ if (RemainingInput(state)[0] == two_char_token[0] &&
+ RemainingInput(state)[1] == two_char_token[1]) {
+ state->parse_state.mangled_idx += 2;
+@@ -326,7 +330,9 @@ static bool ParseTwoCharToken(State *state, const char *two_char_token) {
+ // "char_class" at "mangled_cur" position.
+ static bool ParseCharClass(State *state, const char *char_class) {
+ ComplexityGuard guard(state);
+- if (guard.IsTooComplex()) return false;
++ if (guard.IsTooComplex()) {
++ return false;
++ }
+ if (RemainingInput(state)[0] == '\0') {
+ return false;
+ }
+@@ -340,7 +346,7 @@ static bool ParseCharClass(State *state, const char *char_class) {
+ return false;
+ }
+
+-static bool ParseDigit(State *state, int *digit) {
++static bool ParseDigit(State* state, int* digit) {
+ char c = RemainingInput(state)[0];
+ if (ParseCharClass(state, "0123456789")) {
+ if (digit != nullptr) {
+@@ -352,7 +358,9 @@ static bool ParseDigit(State *state, int *digit) {
+ }
+
+ // This function is used for handling an optional non-terminal.
+-static bool Optional(bool /*status*/) { return true; }
++static bool Optional(bool /*status*/) {
++ return true;
++}
+
+ // This function is used for handling <non-terminal>+ syntax.
+ typedef bool (*ParseFunc)(State *);
+@@ -378,7 +386,7 @@ static bool ZeroOrMore(ParseFunc parse_func, State *state) {
+ // Append "str" at "out_cur_idx". If there is an overflow, out_cur_idx is
+ // set to out_end_idx+1. The output string is ensured to
+ // always terminate with '\0' as long as there is no overflow.
+-static void Append(State *state, const char *const str, const size_t length) {
++static void Append(State* state, const char* const str, const size_t length) {
+ for (size_t i = 0; i < length; ++i) {
+ if (state->parse_state.out_cur_idx + 1 <
+ state->out_end_idx) { // +1 for '\0'
+@@ -396,13 +404,17 @@ static void Append(State *state, const char *const str, const size_t length) {
+ }
+
+ // We don't use equivalents in libc to avoid locale issues.
+-static bool IsLower(char c) { return c >= 'a' && c <= 'z'; }
++static bool IsLower(char c) {
++ return c >= 'a' && c <= 'z';
++}
+
+ static bool IsAlpha(char c) {
+ return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z');
+ }
+
+-static bool IsDigit(char c) { return c >= '0' && c <= '9'; }
++static bool IsDigit(char c) {
++ return c >= '0' && c <= '9';
++}
+
+ // Returns true if "str" is a function clone suffix. These suffixes are used
+ // by GCC 4.5.x and later versions (and our locally-modified version of GCC
+@@ -428,20 +440,22 @@ static bool IsFunctionCloneSuffix(const char *str) {
+ ++i;
+ }
+ }
+- if (!parsed)
++ if (!parsed) {
+ return false;
++ }
+ }
+ return true; // Consumed everything in "str".
+ }
+
+-static bool EndsWith(State *state, const char chr) {
++static bool EndsWith(State* state, const char chr) {
+ return state->parse_state.out_cur_idx > 0 &&
+ state->parse_state.out_cur_idx < state->out_end_idx &&
+ chr == state->out[state->parse_state.out_cur_idx - 1];
+ }
+
+ // Append "str" with some tweaks, iff "append" state is true.
+-static void MaybeAppendWithLength(State *state, const char *const str,
++static void MaybeAppendWithLength(State* state,
++ const char* const str,
+ const size_t length) {
+ if (state->parse_state.append && length > 0) {
+ // Append a space if the output buffer ends with '<' and "str"
+@@ -461,7 +475,7 @@ static void MaybeAppendWithLength(State *state, const char *const str,
+ }
+
+ // Appends a positive decimal number to the output if appending is enabled.
+-static bool MaybeAppendDecimal(State *state, int val) {
++static bool MaybeAppendDecimal(State* state, int val) {
+ // Max {32-64}-bit unsigned int is 20 digits.
+ constexpr size_t kMaxLength = 20;
+ char buf[kMaxLength];
+@@ -471,7 +485,7 @@ static bool MaybeAppendDecimal(State *state, int val) {
+ if (state->parse_state.append) {
+ // We can't have a one-before-the-beginning pointer, so instead start with
+ // one-past-the-end and manipulate one character before the pointer.
+- char *p = &buf[kMaxLength];
++ char* p = &buf[kMaxLength];
+ do { // val=0 is the only input that should write a leading zero digit.
+ *--p = static_cast<char>((val % 10) + '0');
+ val /= 10;
+@@ -486,7 +500,7 @@ static bool MaybeAppendDecimal(State *state, int val) {
+
+ // A convenient wrapper around MaybeAppendWithLength().
+ // Returns true so that it can be placed in "if" conditions.
+-static bool MaybeAppend(State *state, const char *const str) {
++static bool MaybeAppend(State* state, const char* const str) {
+ if (state->parse_state.append) {
+ size_t length = StrLen(str);
+ MaybeAppendWithLength(state, str, length);
+@@ -501,7 +515,7 @@ static bool EnterNestedName(State *state) {
+ }
+
+ // This function is used for handling nested names.
+-static bool LeaveNestedName(State *state, int16_t prev_value) {
++static bool LeaveNestedName(State* state, int16_t prev_value) {
+ state->parse_state.nest_level = prev_value;
+ return true;
+ }
+@@ -543,7 +557,7 @@ static void MaybeCancelLastSeparator(State *state) {
+
+ // Returns true if the identifier of the given length pointed to by
+ // "mangled_cur" is anonymous namespace.
+-static bool IdentifierIsAnonymousNamespace(State *state, size_t length) {
++static bool IdentifierIsAnonymousNamespace(State* state, size_t length) {
+ // Returns true if "anon_prefix" is a proper prefix of "mangled_cur".
+ static const char anon_prefix[] = "_GLOBAL__N_";
+ return (length > (sizeof(anon_prefix) - 1) &&
+@@ -554,25 +568,25 @@ static bool IdentifierIsAnonymousNamespace(State *state, size_t length) {
+ static bool ParseMangledName(State *state);
+ static bool ParseEncoding(State *state);
+ static bool ParseName(State *state);
+-static bool ParseUnscopedName(State *state);
++static bool ParseUnscopedName(State* state);
+ static bool ParseNestedName(State *state);
+ static bool ParsePrefix(State *state);
+ static bool ParseUnqualifiedName(State *state);
+ static bool ParseSourceName(State *state);
+ static bool ParseLocalSourceName(State *state);
+-static bool ParseUnnamedTypeName(State *state);
++static bool ParseUnnamedTypeName(State* state);
+ static bool ParseNumber(State *state, int *number_out);
+ static bool ParseFloatNumber(State *state);
+ static bool ParseSeqId(State *state);
+-static bool ParseIdentifier(State *state, size_t length);
+-static bool ParseOperatorName(State *state, int *arity);
++static bool ParseIdentifier(State* state, size_t length);
++static bool ParseOperatorName(State* state, int* arity);
+ static bool ParseSpecialName(State *state);
+ static bool ParseCallOffset(State *state);
+ static bool ParseNVOffset(State *state);
+ static bool ParseVOffset(State *state);
+-static bool ParseAbiTags(State *state);
++static bool ParseAbiTags(State* state);
+ static bool ParseCtorDtorName(State *state);
+-static bool ParseDecltype(State *state);
++static bool ParseDecltype(State* state);
+ static bool ParseType(State *state);
+ static bool ParseCVQualifiers(State *state);
+ static bool ParseBuiltinType(State *state);
+@@ -585,15 +599,15 @@ static bool ParseTemplateParam(State *state);
+ static bool ParseTemplateTemplateParam(State *state);
+ static bool ParseTemplateArgs(State *state);
+ static bool ParseTemplateArg(State *state);
+-static bool ParseBaseUnresolvedName(State *state);
+-static bool ParseUnresolvedName(State *state);
++static bool ParseBaseUnresolvedName(State* state);
++static bool ParseUnresolvedName(State* state);
+ static bool ParseExpression(State *state);
+ static bool ParseExprPrimary(State *state);
+-static bool ParseExprCastValue(State *state);
++static bool ParseExprCastValue(State* state);
+ static bool ParseLocalName(State *state);
+-static bool ParseLocalNameSuffix(State *state);
++static bool ParseLocalNameSuffix(State* state);
+ static bool ParseDiscriminator(State *state);
+-static bool ParseSubstitution(State *state, bool accept_std);
++static bool ParseSubstitution(State* state, bool accept_std);
+
+ // Implementation note: the following code is a straightforward
+ // translation of the Itanium C++ ABI defined in BNF with a couple of
+@@ -629,7 +643,9 @@ static bool ParseSubstitution(State *state, bool accept_std);
+ // <mangled-name> ::= _Z <encoding>
+ static bool ParseMangledName(State *state) {
+ ComplexityGuard guard(state);
+- if (guard.IsTooComplex()) return false;
++ if (guard.IsTooComplex()) {
++ return false;
++ }
+ return ParseTwoCharToken(state, "_Z") && ParseEncoding(state);
+ }
+
+@@ -638,7 +654,9 @@ static bool ParseMangledName(State *state) {
+ // ::= <special-name>
+ static bool ParseEncoding(State *state) {
+ ComplexityGuard guard(state);
+- if (guard.IsTooComplex()) return false;
++ if (guard.IsTooComplex()) {
++ return false;
++ }
+ // Implementing the first two productions together as <name>
+ // [<bare-function-type>] avoids exponential blowup of backtracking.
+ //
+@@ -660,7 +678,9 @@ static bool ParseEncoding(State *state) {
+ // ::= <local-name>
+ static bool ParseName(State *state) {
+ ComplexityGuard guard(state);
+- if (guard.IsTooComplex()) return false;
++ if (guard.IsTooComplex()) {
++ return false;
++ }
+ if (ParseNestedName(state) || ParseLocalName(state)) {
+ return true;
+ }
+@@ -690,7 +710,9 @@ static bool ParseName(State *state) {
+ // ::= St <unqualified-name>
+ static bool ParseUnscopedName(State *state) {
+ ComplexityGuard guard(state);
+- if (guard.IsTooComplex()) return false;
++ if (guard.IsTooComplex()) {
++ return false;
++ }
+ if (ParseUnqualifiedName(state)) {
+ return true;
+ }
+@@ -706,7 +728,7 @@ static bool ParseUnscopedName(State *state) {
+
+ // <ref-qualifer> ::= R // lvalue method reference qualifier
+ // ::= O // rvalue method reference qualifier
+-static inline bool ParseRefQualifier(State *state) {
++static inline bool ParseRefQualifier(State* state) {
+ return ParseCharClass(state, "OR");
+ }
+
+@@ -716,7 +738,9 @@ static inline bool ParseRefQualifier(State *state) {
+ // <template-args> E
+ static bool ParseNestedName(State *state) {
+ ComplexityGuard guard(state);
+- if (guard.IsTooComplex()) return false;
++ if (guard.IsTooComplex()) {
++ return false;
++ }
+ ParseState copy = state->parse_state;
+ if (ParseOneCharToken(state, 'N') && EnterNestedName(state) &&
+ Optional(ParseCVQualifiers(state)) &&
+@@ -742,7 +766,9 @@ static bool ParseNestedName(State *state) {
+ // ::= <substitution>
+ static bool ParsePrefix(State *state) {
+ ComplexityGuard guard(state);
+- if (guard.IsTooComplex()) return false;
++ if (guard.IsTooComplex()) {
++ return false;
++ }
+ bool has_something = false;
+ while (true) {
+ MaybeAppendSeparator(state);
+@@ -773,7 +799,9 @@ static bool ParsePrefix(State *state) {
+ // <local-source-name> is a GCC extension; see below.
+ static bool ParseUnqualifiedName(State *state) {
+ ComplexityGuard guard(state);
+- if (guard.IsTooComplex()) return false;
++ if (guard.IsTooComplex()) {
++ return false;
++ }
+ if (ParseOperatorName(state, nullptr) || ParseCtorDtorName(state) ||
+ ParseSourceName(state) || ParseLocalSourceName(state) ||
+ ParseUnnamedTypeName(state)) {
+@@ -784,9 +812,11 @@ static bool ParseUnqualifiedName(State *state) {
+
+ // <abi-tags> ::= <abi-tag> [<abi-tags>]
+ // <abi-tag> ::= B <source-name>
+-static bool ParseAbiTags(State *state) {
++static bool ParseAbiTags(State* state) {
+ ComplexityGuard guard(state);
+- if (guard.IsTooComplex()) return false;
++ if (guard.IsTooComplex()) {
++ return false;
++ }
+
+ while (ParseOneCharToken(state, 'B')) {
+ ParseState copy = state->parse_state;
+@@ -805,7 +835,9 @@ static bool ParseAbiTags(State *state) {
+ // <source-name> ::= <positive length number> <identifier>
+ static bool ParseSourceName(State *state) {
+ ComplexityGuard guard(state);
+- if (guard.IsTooComplex()) return false;
++ if (guard.IsTooComplex()) {
++ return false;
++ }
+ ParseState copy = state->parse_state;
+ int length = -1;
+ if (ParseNumber(state, &length) &&
+@@ -823,7 +855,9 @@ static bool ParseSourceName(State *state) {
+ // https://gcc.gnu.org/viewcvs?view=rev&revision=124467
+ static bool ParseLocalSourceName(State *state) {
+ ComplexityGuard guard(state);
+- if (guard.IsTooComplex()) return false;
++ if (guard.IsTooComplex()) {
++ return false;
++ }
+ ParseState copy = state->parse_state;
+ if (ParseOneCharToken(state, 'L') && ParseSourceName(state) &&
+ Optional(ParseDiscriminator(state))) {
+@@ -837,9 +871,11 @@ static bool ParseLocalSourceName(State *state) {
+ // ::= <closure-type-name>
+ // <closure-type-name> ::= Ul <lambda-sig> E [<(nonnegative) number>] _
+ // <lambda-sig> ::= <(parameter) type>+
+-static bool ParseUnnamedTypeName(State *state) {
++static bool ParseUnnamedTypeName(State* state) {
+ ComplexityGuard guard(state);
+- if (guard.IsTooComplex()) return false;
++ if (guard.IsTooComplex()) {
++ return false;
++ }
+ ParseState copy = state->parse_state;
+ // Type's 1-based index n is encoded as { "", n == 1; itoa(n-2), otherwise }.
+ // Optionally parse the encoded value into 'which' and add 2 to get the index.
+@@ -878,12 +914,14 @@ static bool ParseUnnamedTypeName(State *state) {
+ // parsed number on success.
+ static bool ParseNumber(State *state, int *number_out) {
+ ComplexityGuard guard(state);
+- if (guard.IsTooComplex()) return false;
++ if (guard.IsTooComplex()) {
++ return false;
++ }
+ bool negative = false;
+ if (ParseOneCharToken(state, 'n')) {
+ negative = true;
+ }
+- const char *p = RemainingInput(state);
++ const char* p = RemainingInput(state);
+ uint64_t number = 0;
+ for (; *p != '\0'; ++p) {
+ if (IsDigit(*p)) {
+@@ -913,8 +951,10 @@ static bool ParseNumber(State *state, int *number_out) {
+ // hexadecimal string.
+ static bool ParseFloatNumber(State *state) {
+ ComplexityGuard guard(state);
+- if (guard.IsTooComplex()) return false;
+- const char *p = RemainingInput(state);
++ if (guard.IsTooComplex()) {
++ return false;
++ }
++ const char* p = RemainingInput(state);
+ for (; *p != '\0'; ++p) {
+ if (!IsDigit(*p) && !(*p >= 'a' && *p <= 'f')) {
+ break;
+@@ -931,8 +971,10 @@ static bool ParseFloatNumber(State *state) {
+ // using digits and upper case letters
+ static bool ParseSeqId(State *state) {
+ ComplexityGuard guard(state);
+- if (guard.IsTooComplex()) return false;
+- const char *p = RemainingInput(state);
++ if (guard.IsTooComplex()) {
++ return false;
++ }
++ const char* p = RemainingInput(state);
+ for (; *p != '\0'; ++p) {
+ if (!IsDigit(*p) && !(*p >= 'A' && *p <= 'Z')) {
+ break;
+@@ -946,9 +988,11 @@ static bool ParseSeqId(State *state) {
+ }
+
+ // <identifier> ::= <unqualified source code identifier> (of given length)
+-static bool ParseIdentifier(State *state, size_t length) {
++static bool ParseIdentifier(State* state, size_t length) {
+ ComplexityGuard guard(state);
+- if (guard.IsTooComplex()) return false;
++ if (guard.IsTooComplex()) {
++ return false;
++ }
+ if (!AtLeastNumCharsRemaining(RemainingInput(state), length)) {
+ return false;
+ }
+@@ -964,9 +1008,11 @@ static bool ParseIdentifier(State *state, size_t length) {
+ // <operator-name> ::= nw, and other two letters cases
+ // ::= cv <type> # (cast)
+ // ::= v <digit> <source-name> # vendor extended operator
+-static bool ParseOperatorName(State *state, int *arity) {
++static bool ParseOperatorName(State* state, int* arity) {
+ ComplexityGuard guard(state);
+- if (guard.IsTooComplex()) return false;
++ if (guard.IsTooComplex()) {
++ return false;
++ }
+ if (!AtLeastNumCharsRemaining(RemainingInput(state), 2)) {
+ return false;
+ }
+@@ -1036,7 +1082,9 @@ static bool ParseOperatorName(State *state, int *arity) {
+ // stack traces. The are special data.
+ static bool ParseSpecialName(State *state) {
+ ComplexityGuard guard(state);
+- if (guard.IsTooComplex()) return false;
++ if (guard.IsTooComplex()) {
++ return false;
++ }
+ ParseState copy = state->parse_state;
+ if (ParseOneCharToken(state, 'T') && ParseCharClass(state, "VTISH") &&
+ ParseType(state)) {
+@@ -1098,7 +1146,9 @@ static bool ParseSpecialName(State *state) {
+ // ::= v <v-offset> _
+ static bool ParseCallOffset(State *state) {
+ ComplexityGuard guard(state);
+- if (guard.IsTooComplex()) return false;
++ if (guard.IsTooComplex()) {
++ return false;
++ }
+ ParseState copy = state->parse_state;
+ if (ParseOneCharToken(state, 'h') && ParseNVOffset(state) &&
+ ParseOneCharToken(state, '_')) {
+@@ -1118,14 +1168,18 @@ static bool ParseCallOffset(State *state) {
+ // <nv-offset> ::= <(offset) number>
+ static bool ParseNVOffset(State *state) {
+ ComplexityGuard guard(state);
+- if (guard.IsTooComplex()) return false;
++ if (guard.IsTooComplex()) {
++ return false;
++ }
+ return ParseNumber(state, nullptr);
+ }
+
+ // <v-offset> ::= <(offset) number> _ <(virtual offset) number>
+ static bool ParseVOffset(State *state) {
+ ComplexityGuard guard(state);
+- if (guard.IsTooComplex()) return false;
++ if (guard.IsTooComplex()) {
++ return false;
++ }
+ ParseState copy = state->parse_state;
+ if (ParseNumber(state, nullptr) && ParseOneCharToken(state, '_') &&
+ ParseNumber(state, nullptr)) {
+@@ -1144,11 +1198,13 @@ static bool ParseVOffset(State *state) {
+ // ::= C4 | D4
+ static bool ParseCtorDtorName(State *state) {
+ ComplexityGuard guard(state);
+- if (guard.IsTooComplex()) return false;
++ if (guard.IsTooComplex()) {
++ return false;
++ }
+ ParseState copy = state->parse_state;
+ if (ParseOneCharToken(state, 'C')) {
+ if (ParseCharClass(state, "1234")) {
+- const char *const prev_name =
++ const char* const prev_name =
+ state->out + state->parse_state.prev_name_idx;
+ MaybeAppendWithLength(state, prev_name,
+ state->parse_state.prev_name_length);
+@@ -1161,7 +1217,7 @@ static bool ParseCtorDtorName(State *state) {
+ state->parse_state = copy;
+
+ if (ParseOneCharToken(state, 'D') && ParseCharClass(state, "0124")) {
+- const char *const prev_name = state->out + state->parse_state.prev_name_idx;
++ const char* const prev_name = state->out + state->parse_state.prev_name_idx;
+ MaybeAppend(state, "~");
+ MaybeAppendWithLength(state, prev_name,
+ state->parse_state.prev_name_length);
+@@ -1174,9 +1230,11 @@ static bool ParseCtorDtorName(State *state) {
+ // <decltype> ::= Dt <expression> E # decltype of an id-expression or class
+ // # member access (C++0x)
+ // ::= DT <expression> E # decltype of an expression (C++0x)
+-static bool ParseDecltype(State *state) {
++static bool ParseDecltype(State* state) {
+ ComplexityGuard guard(state);
+- if (guard.IsTooComplex()) return false;
++ if (guard.IsTooComplex()) {
++ return false;
++ }
+
+ ParseState copy = state->parse_state;
+ if (ParseOneCharToken(state, 'D') && ParseCharClass(state, "tT") &&
+@@ -1209,7 +1267,9 @@ static bool ParseDecltype(State *state) {
+ //
+ static bool ParseType(State *state) {
+ ComplexityGuard guard(state);
+- if (guard.IsTooComplex()) return false;
++ if (guard.IsTooComplex()) {
++ return false;
++ }
+ ParseState copy = state->parse_state;
+
+ // We should check CV-qualifers, and PRGC things first.
+@@ -1227,7 +1287,9 @@ static bool ParseType(State *state) {
+ // By consuming the CV-qualifiers first, the former parse is disabled.
+ if (ParseCVQualifiers(state)) {
+ const bool result = ParseType(state);
+- if (!result) state->parse_state = copy;
++ if (!result) {
++ state->parse_state = copy;
++ }
+ return result;
+ }
+ state->parse_state = copy;
+@@ -1238,7 +1300,9 @@ static bool ParseType(State *state) {
+ // refusing to backtrack the tag characters.
+ if (ParseCharClass(state, "OPRCG")) {
+ const bool result = ParseType(state);
+- if (!result) state->parse_state = copy;
++ if (!result) {
++ state->parse_state = copy;
++ }
+ return result;
+ }
+ state->parse_state = copy;
+@@ -1286,7 +1350,9 @@ static bool ParseType(State *state) {
+ // ParseType().
+ static bool ParseCVQualifiers(State *state) {
+ ComplexityGuard guard(state);
+- if (guard.IsTooComplex()) return false;
++ if (guard.IsTooComplex()) {
++ return false;
++ }
+ int num_cv_qualifiers = 0;
+ num_cv_qualifiers += ParseOneCharToken(state, 'r');
+ num_cv_qualifiers += ParseOneCharToken(state, 'V');
+@@ -1303,7 +1369,9 @@ static bool ParseCVQualifiers(State *state) {
+ //
+ static bool ParseBuiltinType(State *state) {
+ ComplexityGuard guard(state);
+- if (guard.IsTooComplex()) return false;
++ if (guard.IsTooComplex()) {
++ return false;
++ }
+ const AbbrevPair *p;
+ for (p = kBuiltinTypeList; p->abbrev != nullptr; ++p) {
+ // Guaranteed only 1- or 2-character strings in kBuiltinTypeList.
+@@ -1333,11 +1401,15 @@ static bool ParseBuiltinType(State *state) {
+ // noexcept
+ // ::= Dw <type>+ E # dynamic exception specification
+ // with instantiation-dependent types
+-static bool ParseExceptionSpec(State *state) {
++static bool ParseExceptionSpec(State* state) {
+ ComplexityGuard guard(state);
+- if (guard.IsTooComplex()) return false;
++ if (guard.IsTooComplex()) {
++ return false;
++ }
+
+- if (ParseTwoCharToken(state, "Do")) return true;
++ if (ParseTwoCharToken(state, "Do")) {
++ return true;
++ }
+
+ ParseState copy = state->parse_state;
+ if (ParseTwoCharToken(state, "DO") && ParseExpression(state) &&
+@@ -1357,7 +1429,9 @@ static bool ParseExceptionSpec(State *state) {
+ // <function-type> ::= [exception-spec] F [Y] <bare-function-type> [O] E
+ static bool ParseFunctionType(State *state) {
+ ComplexityGuard guard(state);
+- if (guard.IsTooComplex()) return false;
++ if (guard.IsTooComplex()) {
++ return false;
++ }
+ ParseState copy = state->parse_state;
+ if (Optional(ParseExceptionSpec(state)) && ParseOneCharToken(state, 'F') &&
+ Optional(ParseOneCharToken(state, 'Y')) && ParseBareFunctionType(state) &&
+@@ -1372,7 +1446,9 @@ static bool ParseFunctionType(State *state) {
+ // <bare-function-type> ::= <(signature) type>+
+ static bool ParseBareFunctionType(State *state) {
+ ComplexityGuard guard(state);
+- if (guard.IsTooComplex()) return false;
++ if (guard.IsTooComplex()) {
++ return false;
++ }
+ ParseState copy = state->parse_state;
+ DisableAppend(state);
+ if (OneOrMore(ParseType, state)) {
+@@ -1387,7 +1463,9 @@ static bool ParseBareFunctionType(State *state) {
+ // <class-enum-type> ::= <name>
+ static bool ParseClassEnumType(State *state) {
+ ComplexityGuard guard(state);
+- if (guard.IsTooComplex()) return false;
++ if (guard.IsTooComplex()) {
++ return false;
++ }
+ return ParseName(state);
+ }
+
+@@ -1395,7 +1473,9 @@ static bool ParseClassEnumType(State *state) {
+ // ::= A [<(dimension) expression>] _ <(element) type>
+ static bool ParseArrayType(State *state) {
+ ComplexityGuard guard(state);
+- if (guard.IsTooComplex()) return false;
++ if (guard.IsTooComplex()) {
++ return false;
++ }
+ ParseState copy = state->parse_state;
+ if (ParseOneCharToken(state, 'A') && ParseNumber(state, nullptr) &&
+ ParseOneCharToken(state, '_') && ParseType(state)) {
+@@ -1414,7 +1494,9 @@ static bool ParseArrayType(State *state) {
+ // <pointer-to-member-type> ::= M <(class) type> <(member) type>
+ static bool ParsePointerToMemberType(State *state) {
+ ComplexityGuard guard(state);
+- if (guard.IsTooComplex()) return false;
++ if (guard.IsTooComplex()) {
++ return false;
++ }
+ ParseState copy = state->parse_state;
+ if (ParseOneCharToken(state, 'M') && ParseType(state) && ParseType(state)) {
+ return true;
+@@ -1427,7 +1509,9 @@ static bool ParsePointerToMemberType(State *state) {
+ // ::= T <parameter-2 non-negative number> _
+ static bool ParseTemplateParam(State *state) {
+ ComplexityGuard guard(state);
+- if (guard.IsTooComplex()) return false;
++ if (guard.IsTooComplex()) {
++ return false;
++ }
+ if (ParseTwoCharToken(state, "T_")) {
+ MaybeAppend(state, "?"); // We don't support template substitutions.
+ return true;
+@@ -1447,7 +1531,9 @@ static bool ParseTemplateParam(State *state) {
+ // ::= <substitution>
+ static bool ParseTemplateTemplateParam(State *state) {
+ ComplexityGuard guard(state);
+- if (guard.IsTooComplex()) return false;
++ if (guard.IsTooComplex()) {
++ return false;
++ }
+ return (ParseTemplateParam(state) ||
+ // "std" on its own isn't a template.
+ ParseSubstitution(state, /*accept_std=*/false));
+@@ -1456,7 +1542,9 @@ static bool ParseTemplateTemplateParam(State *state) {
+ // <template-args> ::= I <template-arg>+ E
+ static bool ParseTemplateArgs(State *state) {
+ ComplexityGuard guard(state);
+- if (guard.IsTooComplex()) return false;
++ if (guard.IsTooComplex()) {
++ return false;
++ }
+ ParseState copy = state->parse_state;
+ DisableAppend(state);
+ if (ParseOneCharToken(state, 'I') && OneOrMore(ParseTemplateArg, state) &&
+@@ -1475,7 +1563,9 @@ static bool ParseTemplateArgs(State *state) {
+ // ::= X <expression> E
+ static bool ParseTemplateArg(State *state) {
+ ComplexityGuard guard(state);
+- if (guard.IsTooComplex()) return false;
++ if (guard.IsTooComplex()) {
++ return false;
++ }
+ ParseState copy = state->parse_state;
+ if (ParseOneCharToken(state, 'J') && ZeroOrMore(ParseTemplateArg, state) &&
+ ParseOneCharToken(state, 'E')) {
+@@ -1578,14 +1668,14 @@ static bool ParseTemplateArg(State *state) {
+ // <unresolved-type> ::= <template-param> [<template-args>]
+ // ::= <decltype>
+ // ::= <substitution>
+-static inline bool ParseUnresolvedType(State *state) {
++static inline bool ParseUnresolvedType(State* state) {
+ // No ComplexityGuard because we don't copy the state in this stack frame.
+ return (ParseTemplateParam(state) && Optional(ParseTemplateArgs(state))) ||
+ ParseDecltype(state) || ParseSubstitution(state, /*accept_std=*/false);
+ }
+
+ // <simple-id> ::= <source-name> [<template-args>]
+-static inline bool ParseSimpleId(State *state) {
++static inline bool ParseSimpleId(State* state) {
+ // No ComplexityGuard because we don't copy the state in this stack frame.
+
+ // Note: <simple-id> cannot be followed by a parameter pack; see comment in
+@@ -1596,9 +1686,11 @@ static inline bool ParseSimpleId(State *state) {
+ // <base-unresolved-name> ::= <source-name> [<template-args>]
+ // ::= on <operator-name> [<template-args>]
+ // ::= dn <destructor-name>
+-static bool ParseBaseUnresolvedName(State *state) {
++static bool ParseBaseUnresolvedName(State* state) {
+ ComplexityGuard guard(state);
+- if (guard.IsTooComplex()) return false;
++ if (guard.IsTooComplex()) {
++ return false;
++ }
+
+ if (ParseSimpleId(state)) {
+ return true;
+@@ -1626,9 +1718,11 @@ static bool ParseBaseUnresolvedName(State *state) {
+ // <base-unresolved-name>
+ // ::= [gs] sr <unresolved-qualifier-level>+ E
+ // <base-unresolved-name>
+-static bool ParseUnresolvedName(State *state) {
++static bool ParseUnresolvedName(State* state) {
+ ComplexityGuard guard(state);
+- if (guard.IsTooComplex()) return false;
++ if (guard.IsTooComplex()) {
++ return false;
++ }
+
+ ParseState copy = state->parse_state;
+ if (Optional(ParseTwoCharToken(state, "gs")) &&
+@@ -1684,7 +1778,9 @@ static bool ParseUnresolvedName(State *state) {
+ // ::= fL <number> p <(top-level) CV-qualifiers> <number> _
+ static bool ParseExpression(State *state) {
+ ComplexityGuard guard(state);
+- if (guard.IsTooComplex()) return false;
++ if (guard.IsTooComplex()) {
++ return false;
++ }
+ if (ParseTemplateParam(state) || ParseExprPrimary(state)) {
+ return true;
+ }
+@@ -1817,7 +1913,9 @@ static bool ParseExpression(State *state) {
+ // casts to <local-name> types.
+ static bool ParseExprPrimary(State *state) {
+ ComplexityGuard guard(state);
+- if (guard.IsTooComplex()) return false;
++ if (guard.IsTooComplex()) {
++ return false;
++ }
+ ParseState copy = state->parse_state;
+
+ // The "LZ" special case: if we see LZ, we commit to accept "LZ <encoding> E"
+@@ -1848,9 +1946,11 @@ static bool ParseExprPrimary(State *state) {
+ }
+
+ // <number> or <float>, followed by 'E', as described above ParseExprPrimary.
+-static bool ParseExprCastValue(State *state) {
++static bool ParseExprCastValue(State* state) {
+ ComplexityGuard guard(state);
+- if (guard.IsTooComplex()) return false;
++ if (guard.IsTooComplex()) {
++ return false;
++ }
+ // We have to be able to backtrack after accepting a number because we could
+ // have e.g. "7fffE", which will accept "7" as a number but then fail to find
+ // the 'E'.
+@@ -1877,9 +1977,11 @@ static bool ParseExprCastValue(State *state) {
+ // <local-name-suffix> ::= s [<discriminator>]
+ // ::= <name> [<discriminator>]
+
+-static bool ParseLocalNameSuffix(State *state) {
++static bool ParseLocalNameSuffix(State* state) {
+ ComplexityGuard guard(state);
+- if (guard.IsTooComplex()) return false;
++ if (guard.IsTooComplex()) {
++ return false;
++ }
+
+ if (MaybeAppend(state, "::") && ParseName(state) &&
+ Optional(ParseDiscriminator(state))) {
+@@ -1896,9 +1998,11 @@ static bool ParseLocalNameSuffix(State *state) {
+ return ParseOneCharToken(state, 's') && Optional(ParseDiscriminator(state));
+ }
+
+-static bool ParseLocalName(State *state) {
++static bool ParseLocalName(State* state) {
+ ComplexityGuard guard(state);
+- if (guard.IsTooComplex()) return false;
++ if (guard.IsTooComplex()) {
++ return false;
++ }
+ ParseState copy = state->parse_state;
+ if (ParseOneCharToken(state, 'Z') && ParseEncoding(state) &&
+ ParseOneCharToken(state, 'E') && ParseLocalNameSuffix(state)) {
+@@ -1911,7 +2015,9 @@ static bool ParseLocalName(State *state) {
+ // <discriminator> := _ <(non-negative) number>
+ static bool ParseDiscriminator(State *state) {
+ ComplexityGuard guard(state);
+- if (guard.IsTooComplex()) return false;
++ if (guard.IsTooComplex()) {
++ return false;
++ }
+ ParseState copy = state->parse_state;
+ if (ParseOneCharToken(state, '_') && ParseNumber(state, nullptr)) {
+ return true;
+@@ -1931,9 +2037,11 @@ static bool ParseDiscriminator(State *state) {
+ // an unqualified name and re-parse the same template-args. To block this
+ // exponential backtracking, we disable it with 'accept_std=false' in
+ // problematic contexts.
+-static bool ParseSubstitution(State *state, bool accept_std) {
++static bool ParseSubstitution(State* state, bool accept_std) {
+ ComplexityGuard guard(state);
+- if (guard.IsTooComplex()) return false;
++ if (guard.IsTooComplex()) {
++ return false;
++ }
+ if (ParseTwoCharToken(state, "S_")) {
+ MaybeAppend(state, "?"); // We don't support substitutions.
+ return true;
+@@ -1971,7 +2079,9 @@ static bool ParseSubstitution(State *state, bool accept_std) {
+ // or version suffix. Returns true only if all of "mangled_cur" was consumed.
+ static bool ParseTopLevelMangledName(State *state) {
+ ComplexityGuard guard(state);
+- if (guard.IsTooComplex()) return false;
++ if (guard.IsTooComplex()) {
++ return false;
++ }
+ if (ParseMangledName(state)) {
+ if (RemainingInput(state)[0] != '\0') {
+ // Drop trailing function clone suffix, if any.
+@@ -1991,13 +2101,13 @@ static bool ParseTopLevelMangledName(State *state) {
+ return false;
+ }
+
+-static bool Overflowed(const State *state) {
++static bool Overflowed(const State* state) {
+ return state->parse_state.out_cur_idx >= state->out_end_idx;
+ }
+ #endif
+
+ // The demangler entry point.
+-bool Demangle(const char *mangled, char *out, size_t out_size) {
++bool Demangle(const char* mangled, char* out, size_t out_size) {
+ #if defined(GLOG_OS_WINDOWS)
+ #if defined(HAVE_DBGHELP)
+ // When built with incremental linking, the Windows debugger
+diff --git a/base/third_party/symbolize/demangle.h b/base/third_party/symbolize/demangle.h
+index 26e821a53c2cb..7d5cfaaabf0dd 100644
+--- a/base/third_party/symbolize/demangle.h
++++ b/base/third_party/symbolize/demangle.h
+@@ -80,7 +80,7 @@ _START_GOOGLE_NAMESPACE_
+ // Demangle "mangled". On success, return true and write the
+ // demangled symbol name to "out". Otherwise, return false.
+ // "out" is modified even if demangling is unsuccessful.
+-bool GLOG_EXPORT Demangle(const char *mangled, char *out, size_t out_size);
++bool GLOG_EXPORT Demangle(const char* mangled, char* out, size_t out_size);
+
+ _END_GOOGLE_NAMESPACE_
+
+diff --git a/base/third_party/symbolize/glog/logging.h b/base/third_party/symbolize/glog/logging.h
+index 46869226024da..b935e3ec9cded 100644
+--- a/base/third_party/symbolize/glog/logging.h
++++ b/base/third_party/symbolize/glog/logging.h
+@@ -38,4 +38,4 @@
+
+ // Not needed in Chrome.
+
+-#endif // GLOG_LOGGING_H
++#endif // GLOG_LOGGING_H
+diff --git a/base/third_party/symbolize/symbolize.cc b/base/third_party/symbolize/symbolize.cc
+index b6ddc85d57185..a3b8399f411bf 100644
+--- a/base/third_party/symbolize/symbolize.cc
++++ b/base/third_party/symbolize/symbolize.cc
+@@ -97,7 +97,7 @@ void InstallSymbolizeOpenObjectFileCallback(
+ // where the input symbol is demangled in-place.
+ // To keep stack consumption low, we would like this function to not
+ // get inlined.
+-static ATTRIBUTE_NOINLINE void DemangleInplace(char *out, size_t out_size) {
++static ATTRIBUTE_NOINLINE void DemangleInplace(char* out, size_t out_size) {
+ char demangled[256]; // Big enough for sane demangled symbols.
+ if (Demangle(out, demangled, sizeof(demangled))) {
+ // Demangling succeeded. Copy to out if the space allows.
+@@ -121,17 +121,17 @@ _END_GOOGLE_NAMESPACE_
+ #else
+ #include <elf.h>
+ #endif
++#include <fcntl.h>
++#include <stdint.h>
++#include <sys/stat.h>
++#include <sys/types.h>
++#include <unistd.h>
+ #include <cerrno>
+ #include <climits>
+ #include <cstddef>
+ #include <cstdio>
+ #include <cstdlib>
+ #include <cstring>
+-#include <fcntl.h>
+-#include <stdint.h>
+-#include <sys/stat.h>
+-#include <sys/types.h>
+-#include <unistd.h>
+
+ #include "symbolize.h"
+ #include "config.h"
+@@ -153,7 +153,8 @@ ssize_t ReadFromOffset(const int fd,
+ const size_t count,
+ const size_t offset) {
+ SAFE_ASSERT(fd >= 0);
+- SAFE_ASSERT(count <= static_cast<size_t>(std::numeric_limits<ssize_t>::max()));
++ SAFE_ASSERT(count <=
++ static_cast<size_t>(std::numeric_limits<ssize_t>::max()));
+ char *buf0 = reinterpret_cast<char *>(buf);
+ size_t num_bytes = 0;
+ while (num_bytes < count) {
+@@ -176,8 +177,10 @@ ssize_t ReadFromOffset(const int fd,
+ // pointed by "fd" into the buffer starting at "buf" while handling
+ // short reads and EINTR. On success, return true. Otherwise, return
+ // false.
+-static bool ReadFromOffsetExact(const int fd, void *buf,
+- const size_t count, const size_t offset) {
++static bool ReadFromOffsetExact(const int fd,
++ void* buf,
++ const size_t count,
++ const size_t offset) {
+ ssize_t len = ReadFromOffset(fd, buf, count, offset);
+ return static_cast<size_t>(len) == count;
+ }
+@@ -199,9 +202,11 @@ static int FileGetElfType(const int fd) {
+ // and return true. Otherwise, return false.
+ // To keep stack consumption low, we would like this function to not get
+ // inlined.
+-static ATTRIBUTE_NOINLINE bool
+-GetSectionHeaderByType(const int fd, ElfW(Half) sh_num, const size_t sh_offset,
+- ElfW(Word) type, ElfW(Shdr) *out) {
++static ATTRIBUTE_NOINLINE bool GetSectionHeaderByType(const int fd,
++ ElfW(Half) sh_num,
++ const size_t sh_offset,
++ ElfW(Word) type,
++ ElfW(Shdr) * out) {
+ // Read at most 16 section headers at a time to save read calls.
+ ElfW(Shdr) buf[16];
+ for (size_t i = 0; i < sh_num;) {
+@@ -248,8 +253,8 @@ bool GetSectionHeaderByName(int fd, const char *name, size_t name_len,
+ }
+
+ for (size_t i = 0; i < elf_header.e_shnum; ++i) {
+- size_t section_header_offset = (elf_header.e_shoff +
+- elf_header.e_shentsize * i);
++ size_t section_header_offset =
++ (elf_header.e_shoff + elf_header.e_shentsize * i);
+ if (!ReadFromOffsetExact(fd, out, sizeof(*out), section_header_offset)) {
+ return false;
+ }
+@@ -281,10 +286,13 @@ bool GetSectionHeaderByName(int fd, const char *name, size_t name_len,
+ // to out. Otherwise, return false.
+ // To keep stack consumption low, we would like this function to not get
+ // inlined.
+-static ATTRIBUTE_NOINLINE bool
+-FindSymbol(uint64_t pc, const int fd, char *out, size_t out_size,
+- uint64_t symbol_offset, const ElfW(Shdr) *strtab,
+- const ElfW(Shdr) *symtab) {
++static ATTRIBUTE_NOINLINE bool FindSymbol(uint64_t pc,
++ const int fd,
++ char* out,
++ size_t out_size,
++ uint64_t symbol_offset,
++ const ElfW(Shdr) * strtab,
++ const ElfW(Shdr) * symtab) {
+ if (symtab == NULL) {
+ return false;
+ }
+@@ -384,7 +392,7 @@ namespace {
+ // and snprintf().
+ class LineReader {
+ public:
+- explicit LineReader(int fd, char *buf, size_t buf_len, size_t offset)
++ explicit LineReader(int fd, char* buf, size_t buf_len, size_t offset)
+ : fd_(fd),
+ buf_(buf),
+ buf_len_(buf_len),
+@@ -449,11 +457,12 @@ class LineReader {
+ }
+
+ private:
+- LineReader(const LineReader &);
++ LineReader(const LineReader&);
+ void operator=(const LineReader&);
+
+ char *FindLineFeed() {
+- return reinterpret_cast<char *>(memchr(bol_, '\n', static_cast<size_t>(eod_ - bol_)));
++ return reinterpret_cast<char*>(
++ memchr(bol_, '\n', static_cast<size_t>(eod_ - bol_)));
+ }
+
+ bool BufferIsEmpty() {
+@@ -483,7 +492,8 @@ static char *GetHex(const char *start, const char *end, uint64_t *hex) {
+ int ch = *p;
+ if ((ch >= '0' && ch <= '9') ||
+ (ch >= 'A' && ch <= 'F') || (ch >= 'a' && ch <= 'f')) {
+- *hex = (*hex << 4U) | (ch < 'A' ? static_cast<uint64_t>(ch - '0') : (ch & 0xF) + 9U);
++ *hex = (*hex << 4U) |
++ (ch < 'A' ? static_cast<uint64_t>(ch - '0') : (ch & 0xF) + 9U);
+ } else { // Encountered the first non-hex character.
+ break;
+ }
+@@ -647,12 +657,11 @@ static int OpenObjectFileContainingPcAndGetStartAddressNoHook(
+ }
+ }
+
+-int OpenObjectFileContainingPcAndGetStartAddress(
+- uint64_t pc,
+- uint64_t& start_address,
+- uint64_t& base_address,
+- char* out_file_name,
+- size_t out_file_name_size) {
++int OpenObjectFileContainingPcAndGetStartAddress(uint64_t pc,
++ uint64_t& start_address,
++ uint64_t& base_address,
++ char* out_file_name,
++ size_t out_file_name_size) {
+ if (g_symbolize_open_object_file_callback) {
+ return g_symbolize_open_object_file_callback(
+ pc, start_address, base_address, out_file_name, out_file_name_size);
+@@ -668,7 +677,11 @@ int OpenObjectFileContainingPcAndGetStartAddress(
+ // bytes. Output will be truncated as needed, and a NUL character is always
+ // appended.
+ // NOTE: code from sandbox/linux/seccomp-bpf/demo.cc.
+-static char *itoa_r(uintptr_t i, char *buf, size_t sz, unsigned base, size_t padding) {
++static char* itoa_r(uintptr_t i,
++ char* buf,
++ size_t sz,
++ unsigned base,
++ size_t padding) {
+ // Make sure we can write at least one NUL byte.
+ size_t n = 1;
+ if (n > sz) {
+@@ -745,7 +758,8 @@ static void SafeAppendHexNumber(uint64_t value, char* dest, size_t dest_size) {
+ // and "out" is used as its output.
+ // To keep stack consumption low, we would like this function to not
+ // get inlined.
+-static ATTRIBUTE_NOINLINE bool SymbolizeAndDemangle(void *pc, char *out,
++static ATTRIBUTE_NOINLINE bool SymbolizeAndDemangle(void* pc,
++ char* out,
+ size_t out_size) {
+ uint64_t pc0 = reinterpret_cast<uintptr_t>(pc);
+ uint64_t start_address = 0;
+@@ -822,14 +836,16 @@ static ATTRIBUTE_NOINLINE bool SymbolizeAndDemangle(void *pc, char *out,
+
+ _END_GOOGLE_NAMESPACE_
+
+-#elif (defined(GLOG_OS_MACOSX) || defined(GLOG_OS_EMSCRIPTEN)) && defined(HAVE_DLADDR)
++#elif (defined(GLOG_OS_MACOSX) || defined(GLOG_OS_EMSCRIPTEN)) && \
++ defined(HAVE_DLADDR)
+
+ #include <dlfcn.h>
+ #include <cstring>
+
+ _START_GOOGLE_NAMESPACE_
+
+-static ATTRIBUTE_NOINLINE bool SymbolizeAndDemangle(void *pc, char *out,
++static ATTRIBUTE_NOINLINE bool SymbolizeAndDemangle(void* pc,
++ char* out,
+ size_t out_size) {
+ Dl_info info;
+ if (dladdr(pc, &info)) {
+@@ -883,7 +899,8 @@ private:
+ SymInitializer& operator=(const SymInitializer&);
+ };
+
+-static ATTRIBUTE_NOINLINE bool SymbolizeAndDemangle(void *pc, char *out,
++static ATTRIBUTE_NOINLINE bool SymbolizeAndDemangle(void* pc,
++ char* out,
+ size_t out_size) {
+ const static SymInitializer symInitializer;
+ if (!symInitializer.ready) {
+@@ -918,7 +935,7 @@ _END_GOOGLE_NAMESPACE_
+
+ _START_GOOGLE_NAMESPACE_
+
+-bool Symbolize(void *pc, char *out, size_t out_size) {
++bool Symbolize(void* pc, char* out, size_t out_size) {
+ return SymbolizeAndDemangle(pc, out, out_size);
+ }
+
+diff --git a/base/third_party/symbolize/symbolize.h b/base/third_party/symbolize/symbolize.h
+index 64ff0509ce57d..987569fdde67f 100644
+--- a/base/third_party/symbolize/symbolize.h
++++ b/base/third_party/symbolize/symbolize.h
+@@ -127,7 +127,7 @@ ATTRIBUTE_NOINLINE int OpenObjectFileContainingPcAndGetStartAddress(
+
+ _END_GOOGLE_NAMESPACE_
+
+-#endif /* __ELF__ */
++#endif /* __ELF__ */
+
+ _START_GOOGLE_NAMESPACE_
+
+@@ -140,7 +140,7 @@ struct FileDescriptor {
+ int get() { return fd_; }
+
+ private:
+- FileDescriptor(const FileDescriptor &);
++ FileDescriptor(const FileDescriptor&);
+ void operator=(const FileDescriptor&);
+ };
+
+diff --git a/base/third_party/symbolize/utilities.h b/base/third_party/symbolize/utilities.h
+index 8c61380fad159..bb206a8020315 100644
+--- a/base/third_party/symbolize/utilities.h
++++ b/base/third_party/symbolize/utilities.h
+@@ -35,13 +35,13 @@
+ #define UTILITIES_H__
+
+ #ifdef HAVE___ATTRIBUTE__
+-# define ATTRIBUTE_NOINLINE __attribute__ ((noinline))
+-# define HAVE_ATTRIBUTE_NOINLINE
++#define ATTRIBUTE_NOINLINE __attribute__((noinline))
++#define HAVE_ATTRIBUTE_NOINLINE
+ #elif defined(GLOG_OS_WINDOWS)
+-# define ATTRIBUTE_NOINLINE __declspec(noinline)
+-# define HAVE_ATTRIBUTE_NOINLINE
++#define ATTRIBUTE_NOINLINE __declspec(noinline)
++#define HAVE_ATTRIBUTE_NOINLINE
+ #else
+-# define ATTRIBUTE_NOINLINE
++#define ATTRIBUTE_NOINLINE
+ #endif
+
+ #endif // UTILITIES_H__
diff --git a/base/third_party/symbolize/symbolize.cc b/base/third_party/symbolize/symbolize.cc
index c13be34..a2794e9 100644
--- a/base/third_party/symbolize/symbolize.cc
+++ b/base/third_party/symbolize/symbolize.cc
@@ -45,28 +45,35 @@
// some functions which are not guaranteed to be so, such as memchr()
// and memmove(). We assume they are async-signal-safe.
//
+// Additional header can be specified by the GLOG_BUILD_CONFIG_INCLUDE
+// macro to add platform specific defines (e.g. GLOG_OS_OPENBSD).
-// Note for Cobalt: Cobalt Starboard depends on the old version of Symbolize so
-// this file is from m27 Chromium. There are no Cobalt-introduced changes in
-// this file.
+#ifdef GLOG_BUILD_CONFIG_INCLUDE
+#include GLOG_BUILD_CONFIG_INCLUDE
+#endif // GLOG_BUILD_CONFIG_INCLUDE
-#include "build/build_config.h"
+#include "symbolize.h"
#include "utilities.h"
-#if SB_IS(EVERGREEN_COMPATIBLE)
-#include "starboard/common/log.h"
-#include "starboard/elf_loader/evergreen_info.h"
-#include "starboard/memory.h"
-#endif
-
#if defined(HAVE_SYMBOLIZE)
+#include <cstring>
+#include <cstdlib>
+
+#include <algorithm>
#include <limits>
#include "symbolize.h"
#include "demangle.h"
+#if defined(STARBOARD)
+#include "starboard/configuration.h"
+#include "starboard/common/log.h"
+#include "starboard/elf_loader/evergreen_info.h"
+#include "starboard/memory.h"
+#endif
+
_START_GOOGLE_NAMESPACE_
// We don't use assert() since it's not guaranteed to be
@@ -86,15 +93,22 @@
g_symbolize_callback = callback;
}
+static SymbolizeOpenObjectFileCallback g_symbolize_open_object_file_callback =
+ NULL;
+void InstallSymbolizeOpenObjectFileCallback(
+ SymbolizeOpenObjectFileCallback callback) {
+ g_symbolize_open_object_file_callback = callback;
+}
+
// This function wraps the Demangle function to provide an interface
// where the input symbol is demangled in-place.
// To keep stack consumption low, we would like this function to not
// get inlined.
-static ATTRIBUTE_NOINLINE void DemangleInplace(char *out, int out_size) {
+static ATTRIBUTE_NOINLINE void DemangleInplace(char* out, size_t out_size) {
char demangled[256]; // Big enough for sane demangled symbols.
if (Demangle(out, demangled, sizeof(demangled))) {
// Demangling succeeded. Copy to out if the space allows.
- int len = strlen(demangled);
+ size_t len = strlen(demangled);
if (len + 1 <= out_size) { // +1 for '\0'.
SAFE_ASSERT(len < sizeof(demangled));
memmove(out, demangled, len + 1);
@@ -106,23 +120,25 @@
#if defined(__ELF__)
+#if defined(HAVE_DLFCN_H)
#include <dlfcn.h>
-#if defined(OS_OPENBSD)
+#endif
+#if defined(GLOG_OS_OPENBSD)
#include <sys/exec_elf.h>
#else
#include <elf.h>
#endif
-#include <errno.h>
#include <fcntl.h>
-#include <limits.h>
-#include <stddef.h>
#include <stdint.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
+#include <cerrno>
+#include <climits>
+#include <cstddef>
+#include <cstdio>
+#include <cstdlib>
+#include <cstring>
#include "symbolize.h"
#include "config.h"
@@ -133,49 +149,47 @@
_START_GOOGLE_NAMESPACE_
-// Read up to "count" bytes from file descriptor "fd" into the buffer
-// starting at "buf" while handling short reads and EINTR. On
-// success, return the number of bytes read. Otherwise, return -1.
-static ssize_t ReadPersistent(const int fd, void *buf, const size_t count) {
+FileDescriptor::~FileDescriptor() {
+ if (fd_ >= 0) {
+ close(fd_);
+ }
+}
+
+ssize_t ReadFromOffset(const int fd,
+ void* buf,
+ const size_t count,
+ const size_t offset) {
SAFE_ASSERT(fd >= 0);
- SAFE_ASSERT(count <= std::numeric_limits<ssize_t>::max());
+ SAFE_ASSERT(count <=
+ static_cast<size_t>(std::numeric_limits<ssize_t>::max()));
char *buf0 = reinterpret_cast<char *>(buf);
- ssize_t num_bytes = 0;
+ size_t num_bytes = 0;
while (num_bytes < count) {
ssize_t len;
- NO_INTR(len = read(fd, buf0 + num_bytes, count - num_bytes));
+ NO_INTR(len = pread(fd, buf0 + num_bytes, count - num_bytes,
+ static_cast<off_t>(offset + num_bytes)));
if (len < 0) { // There was an error other than EINTR.
return -1;
}
if (len == 0) { // Reached EOF.
break;
}
- num_bytes += len;
+ num_bytes += static_cast<size_t>(len);
}
SAFE_ASSERT(num_bytes <= count);
- return num_bytes;
-}
-
-// Read up to "count" bytes from "offset" in the file pointed by file
-// descriptor "fd" into the buffer starting at "buf". On success,
-// return the number of bytes read. Otherwise, return -1.
-static ssize_t ReadFromOffset(const int fd, void *buf,
- const size_t count, const off_t offset) {
- off_t off = lseek(fd, offset, SEEK_SET);
- if (off == (off_t)-1) {
- return -1;
- }
- return ReadPersistent(fd, buf, count);
+ return static_cast<ssize_t>(num_bytes);
}
// Try reading exactly "count" bytes from "offset" bytes in a file
// pointed by "fd" into the buffer starting at "buf" while handling
// short reads and EINTR. On success, return true. Otherwise, return
// false.
-static bool ReadFromOffsetExact(const int fd, void *buf,
- const size_t count, const off_t offset) {
+static bool ReadFromOffsetExact(const int fd,
+ void* buf,
+ const size_t count,
+ const size_t offset) {
ssize_t len = ReadFromOffset(fd, buf, count, offset);
- return len == count;
+ return static_cast<size_t>(len) == count;
}
// Returns elf_header.e_type if the file pointed by fd is an ELF binary.
@@ -195,21 +209,26 @@
// and return true. Otherwise, return false.
// To keep stack consumption low, we would like this function to not get
// inlined.
-static ATTRIBUTE_NOINLINE bool
-GetSectionHeaderByType(const int fd, ElfW(Half) sh_num, const off_t sh_offset,
- ElfW(Word) type, ElfW(Shdr) *out) {
+static ATTRIBUTE_NOINLINE bool GetSectionHeaderByType(const int fd,
+ ElfW(Half) sh_num,
+ const size_t sh_offset,
+ ElfW(Word) type,
+ ElfW(Shdr) * out) {
// Read at most 16 section headers at a time to save read calls.
ElfW(Shdr) buf[16];
- for (int i = 0; i < sh_num;) {
- const ssize_t num_bytes_left = (sh_num - i) * sizeof(buf[0]);
- const ssize_t num_bytes_to_read =
+ for (size_t i = 0; i < sh_num;) {
+ const size_t num_bytes_left = (sh_num - i) * sizeof(buf[0]);
+ const size_t num_bytes_to_read =
(sizeof(buf) > num_bytes_left) ? num_bytes_left : sizeof(buf);
const ssize_t len = ReadFromOffset(fd, buf, num_bytes_to_read,
sh_offset + i * sizeof(buf[0]));
- SAFE_ASSERT(len % sizeof(buf[0]) == 0);
- const ssize_t num_headers_in_buf = len / sizeof(buf[0]);
+ if (len == -1) {
+ return false;
+ }
+ SAFE_ASSERT(static_cast<size_t>(len) % sizeof(buf[0]) == 0);
+ const size_t num_headers_in_buf = static_cast<size_t>(len) / sizeof(buf[0]);
SAFE_ASSERT(num_headers_in_buf <= sizeof(buf) / sizeof(buf[0]));
- for (int j = 0; j < num_headers_in_buf; ++j) {
+ for (size_t j = 0; j < num_headers_in_buf; ++j) {
if (buf[j].sh_type == type) {
*out = buf[j];
return true;
@@ -233,15 +252,16 @@
}
ElfW(Shdr) shstrtab;
- off_t shstrtab_offset = (elf_header.e_shoff +
- elf_header.e_shentsize * elf_header.e_shstrndx);
+ size_t shstrtab_offset =
+ (elf_header.e_shoff + static_cast<size_t>(elf_header.e_shentsize) *
+ static_cast<size_t>(elf_header.e_shstrndx));
if (!ReadFromOffsetExact(fd, &shstrtab, sizeof(shstrtab), shstrtab_offset)) {
return false;
}
- for (int i = 0; i < elf_header.e_shnum; ++i) {
- off_t section_header_offset = (elf_header.e_shoff +
- elf_header.e_shentsize * i);
+ for (size_t i = 0; i < elf_header.e_shnum; ++i) {
+ size_t section_header_offset =
+ (elf_header.e_shoff + elf_header.e_shentsize * i);
if (!ReadFromOffsetExact(fd, out, sizeof(*out), section_header_offset)) {
return false;
}
@@ -252,11 +272,11 @@
// No point in even trying.
return false;
}
- off_t name_offset = shstrtab.sh_offset + out->sh_name;
+ size_t name_offset = shstrtab.sh_offset + out->sh_name;
ssize_t n_read = ReadFromOffset(fd, &header_name, name_len, name_offset);
if (n_read == -1) {
return false;
- } else if (n_read != name_len) {
+ } else if (static_cast<size_t>(n_read) != name_len) {
// Short read -- name could be at end of file.
continue;
}
@@ -273,33 +293,38 @@
// to out. Otherwise, return false.
// To keep stack consumption low, we would like this function to not get
// inlined.
-static ATTRIBUTE_NOINLINE bool
-FindSymbol(uint64_t pc, const int fd, char *out, int out_size,
- uint64_t symbol_offset, const ElfW(Shdr) *strtab,
- const ElfW(Shdr) *symtab) {
+static ATTRIBUTE_NOINLINE bool FindSymbol(uint64_t pc,
+ const int fd,
+ char* out,
+ size_t out_size,
+ uint64_t symbol_offset,
+ const ElfW(Shdr) * strtab,
+ const ElfW(Shdr) * symtab) {
if (symtab == NULL) {
return false;
}
- const int num_symbols = symtab->sh_size / symtab->sh_entsize;
- for (int i = 0; i < num_symbols;) {
- off_t offset = symtab->sh_offset + i * symtab->sh_entsize;
+ const size_t num_symbols = symtab->sh_size / symtab->sh_entsize;
+ for (unsigned i = 0; i < num_symbols;) {
+ size_t offset = symtab->sh_offset + i * symtab->sh_entsize;
// If we are reading Elf64_Sym's, we want to limit this array to
// 32 elements (to keep stack consumption low), otherwise we can
// have a 64 element Elf32_Sym array.
-#if __WORDSIZE == 64
-#define NUM_SYMBOLS 32
+#if defined(__WORDSIZE) && __WORDSIZE == 64
+ const size_t NUM_SYMBOLS = 32U;
#else
-#define NUM_SYMBOLS 64
+ const size_t NUM_SYMBOLS = 64U;
#endif
// Read at most NUM_SYMBOLS symbols at once to save read() calls.
ElfW(Sym) buf[NUM_SYMBOLS];
- const ssize_t len = ReadFromOffset(fd, &buf, sizeof(buf), offset);
- SAFE_ASSERT(len % sizeof(buf[0]) == 0);
- const ssize_t num_symbols_in_buf = len / sizeof(buf[0]);
- SAFE_ASSERT(num_symbols_in_buf <= sizeof(buf) / sizeof(buf[0]));
- for (int j = 0; j < num_symbols_in_buf; ++j) {
+ size_t num_symbols_to_read = std::min(NUM_SYMBOLS, num_symbols - i);
+ const ssize_t len =
+ ReadFromOffset(fd, &buf, sizeof(buf[0]) * num_symbols_to_read, offset);
+ SAFE_ASSERT(static_cast<size_t>(len) % sizeof(buf[0]) == 0);
+ const size_t num_symbols_in_buf = static_cast<size_t>(len) / sizeof(buf[0]);
+ SAFE_ASSERT(num_symbols_in_buf <= num_symbols_to_read);
+ for (unsigned j = 0; j < num_symbols_in_buf; ++j) {
const ElfW(Sym)& symbol = buf[j];
uint64_t start_address = symbol.st_value;
start_address += symbol_offset;
@@ -310,6 +335,7 @@
ssize_t len1 = ReadFromOffset(fd, out, out_size,
strtab->sh_offset + symbol.st_name);
if (len1 <= 0 || memchr(out, '\0', out_size) == NULL) {
+ memset(out, 0, out_size);
return false;
}
return true; // Obtained the symbol name.
@@ -327,69 +353,44 @@
static bool GetSymbolFromObjectFile(const int fd,
uint64_t pc,
char* out,
- int out_size,
- uint64_t map_start_address) {
+ size_t out_size,
+ uint64_t base_address) {
// Read the ELF header.
ElfW(Ehdr) elf_header;
if (!ReadFromOffsetExact(fd, &elf_header, sizeof(elf_header), 0)) {
return false;
}
- uint64_t symbol_offset = 0;
- if (elf_header.e_type == ET_DYN) { // DSO needs offset adjustment.
- symbol_offset = map_start_address;
- }
-
ElfW(Shdr) symtab, strtab;
// Consult a regular symbol table first.
- if (!GetSectionHeaderByType(fd, elf_header.e_shnum, elf_header.e_shoff,
- SHT_SYMTAB, &symtab)) {
- return false;
- }
- if (!ReadFromOffsetExact(
- fd, &strtab, sizeof(strtab),
- elf_header.e_shoff + symtab.sh_link * sizeof(symtab))) {
- return false;
- }
- if (FindSymbol(pc, fd, out, out_size, symbol_offset, &strtab, &symtab)) {
- return true; // Found the symbol in a regular symbol table.
+ if (GetSectionHeaderByType(fd, elf_header.e_shnum, elf_header.e_shoff,
+ SHT_SYMTAB, &symtab)) {
+ if (!ReadFromOffsetExact(fd, &strtab, sizeof(strtab), elf_header.e_shoff +
+ symtab.sh_link * sizeof(symtab))) {
+ return false;
+ }
+ if (FindSymbol(pc, fd, out, out_size, base_address, &strtab, &symtab)) {
+ return true; // Found the symbol in a regular symbol table.
+ }
}
// If the symbol is not found, then consult a dynamic symbol table.
- if (!GetSectionHeaderByType(fd, elf_header.e_shnum, elf_header.e_shoff,
- SHT_DYNSYM, &symtab)) {
- return false;
- }
- if (!ReadFromOffsetExact(
- fd, &strtab, sizeof(strtab),
- elf_header.e_shoff + symtab.sh_link * sizeof(symtab))) {
- return false;
- }
- if (FindSymbol(pc, fd, out, out_size, symbol_offset, &strtab, &symtab)) {
- return true; // Found the symbol in a dynamic symbol table.
+ if (GetSectionHeaderByType(fd, elf_header.e_shnum, elf_header.e_shoff,
+ SHT_DYNSYM, &symtab)) {
+ if (!ReadFromOffsetExact(fd, &strtab, sizeof(strtab), elf_header.e_shoff +
+ symtab.sh_link * sizeof(symtab))) {
+ return false;
+ }
+ if (FindSymbol(pc, fd, out, out_size, base_address, &strtab, &symtab)) {
+ return true; // Found the symbol in a dynamic symbol table.
+ }
}
return false;
}
namespace {
-// Thin wrapper around a file descriptor so that the file descriptor
-// gets closed for sure.
-struct FileDescriptor {
- const int fd_;
- explicit FileDescriptor(int fd) : fd_(fd) {}
- ~FileDescriptor() {
- if (fd_ >= 0) {
- NO_INTR(close(fd_));
- }
- }
- int get() { return fd_; }
-
- private:
- explicit FileDescriptor(const FileDescriptor&);
- void operator=(const FileDescriptor&);
-};
// Helper class for reading lines from file.
//
@@ -398,9 +399,14 @@
// and snprintf().
class LineReader {
public:
- explicit LineReader(int fd, char *buf, int buf_len) : fd_(fd),
- buf_(buf), buf_len_(buf_len), bol_(buf), eol_(buf), eod_(buf) {
- }
+ explicit LineReader(int fd, char* buf, size_t buf_len, size_t offset)
+ : fd_(fd),
+ buf_(buf),
+ buf_len_(buf_len),
+ offset_(offset),
+ bol_(buf),
+ eol_(buf),
+ eod_(buf) {}
// Read '\n'-terminated line from file. On success, modify "bol"
// and "eol", then return true. Otherwise, return false.
@@ -409,27 +415,29 @@
// dropped. It's an intentional behavior to make the code simple.
bool ReadLine(const char **bol, const char **eol) {
if (BufferIsEmpty()) { // First time.
- const ssize_t num_bytes = ReadPersistent(fd_, buf_, buf_len_);
+ const ssize_t num_bytes = ReadFromOffset(fd_, buf_, buf_len_, offset_);
if (num_bytes <= 0) { // EOF or error.
return false;
}
+ offset_ += static_cast<size_t>(num_bytes);
eod_ = buf_ + num_bytes;
bol_ = buf_;
} else {
bol_ = eol_ + 1; // Advance to the next line in the buffer.
SAFE_ASSERT(bol_ <= eod_); // "bol_" can point to "eod_".
if (!HasCompleteLine()) {
- const int incomplete_line_length = eod_ - bol_;
+ const size_t incomplete_line_length = static_cast<size_t>(eod_ - bol_);
// Move the trailing incomplete line to the beginning.
memmove(buf_, bol_, incomplete_line_length);
// Read text from file and append it.
char * const append_pos = buf_ + incomplete_line_length;
- const int capacity_left = buf_len_ - incomplete_line_length;
- const ssize_t num_bytes = ReadPersistent(fd_, append_pos,
- capacity_left);
+ const size_t capacity_left = buf_len_ - incomplete_line_length;
+ const ssize_t num_bytes =
+ ReadFromOffset(fd_, append_pos, capacity_left, offset_);
if (num_bytes <= 0) { // EOF or error.
return false;
}
+ offset_ += static_cast<size_t>(num_bytes);
eod_ = append_pos + num_bytes;
bol_ = buf_;
}
@@ -456,11 +464,12 @@
}
private:
- explicit LineReader(const LineReader&);
+ LineReader(const LineReader&);
void operator=(const LineReader&);
char *FindLineFeed() {
- return reinterpret_cast<char*>(memchr(bol_, '\n', eod_ - bol_));
+ return reinterpret_cast<char*>(
+ memchr(bol_, '\n', static_cast<size_t>(eod_ - bol_)));
}
bool BufferIsEmpty() {
@@ -473,7 +482,8 @@
const int fd_;
char * const buf_;
- const int buf_len_;
+ const size_t buf_len_;
+ size_t offset_;
char *bol_;
char *eol_;
const char *eod_; // End of data in "buf_".
@@ -489,7 +499,8 @@
int ch = *p;
if ((ch >= '0' && ch <= '9') ||
(ch >= 'A' && ch <= 'F') || (ch >= 'a' && ch <= 'f')) {
- *hex = (*hex << 4) | (ch < 'A' ? ch - '0' : (ch & 0xF) + 9);
+ *hex = (*hex << 4U) |
+ (ch < 'A' ? static_cast<uint64_t>(ch - '0') : (ch & 0xF) + 9U);
} else { // Encountered the first non-hex character.
break;
}
@@ -509,16 +520,14 @@
}
#endif
-// Search for the object file (from /proc/self/maps) that contains
-// the specified pc. If found, open this file and return the file handle,
-// and also set start_address to the start address of where this object
-// file is mapped to in memory. Otherwise, return -1.
-static ATTRIBUTE_NOINLINE int OpenObjectFileContainingPcAndGetStartAddress(
+static int OpenObjectFileContainingPcAndGetStartAddressNoHook(
uint64_t pc,
- uint64_t& start_address) {
+ uint64_t& start_address,
+ uint64_t& base_address,
+ char* out_file_name,
+ size_t out_file_name_size) {
int object_fd;
- // Open /proc/self/maps.
int maps_fd;
NO_INTR(maps_fd = open("/proc/self/maps", O_RDONLY));
FileDescriptor wrapped_maps_fd(maps_fd);
@@ -526,11 +535,20 @@
return -1;
}
+ int mem_fd;
+ NO_INTR(mem_fd = open("/proc/self/mem", O_RDONLY));
+ FileDescriptor wrapped_mem_fd(mem_fd);
+ if (wrapped_mem_fd.get() < 0) {
+ return -1;
+ }
+
// Iterate over maps and look for the map containing the pc. Then
// look into the symbol tables inside.
char buf[1024]; // Big enough for line of sane /proc/self/maps
- LineReader reader(wrapped_maps_fd.get(), buf, sizeof(buf));
+ unsigned num_maps = 0;
+ LineReader reader(wrapped_maps_fd.get(), buf, sizeof(buf), 0);
while (true) {
+ num_maps++;
const char *cursor;
const char *eol;
if (!reader.ReadLine(&cursor, &eol)) { // EOF or malformed line.
@@ -559,11 +577,6 @@
}
++cursor; // Skip ' '.
- // Check start and end addresses.
- if (!(start_address <= pc && pc < end_address)) {
- continue; // We skip this map. PC isn't in this map.
- }
-
// Read flags. Skip flags until we encounter a space or eol.
const char * const flags_start = cursor;
while (cursor < eol && *cursor != ' ') {
@@ -574,20 +587,71 @@
return -1; // Malformed line.
}
- // Check flags. We are only interested in "r-x" maps.
- if (memcmp(flags_start, "r-x", 3) != 0) { // Not a "r-x" map.
+ // Determine the base address by reading ELF headers in process memory.
+ ElfW(Ehdr) ehdr;
+ // Skip non-readable maps.
+ if (flags_start[0] == 'r' &&
+ ReadFromOffsetExact(mem_fd, &ehdr, sizeof(ElfW(Ehdr)), start_address) &&
+ memcmp(ehdr.e_ident, ELFMAG, SELFMAG) == 0) {
+ switch (ehdr.e_type) {
+ case ET_EXEC:
+ base_address = 0;
+ break;
+ case ET_DYN:
+ // Find the segment containing file offset 0. This will correspond
+ // to the ELF header that we just read. Normally this will have
+ // virtual address 0, but this is not guaranteed. We must subtract
+ // the virtual address from the address where the ELF header was
+ // mapped to get the base address.
+ //
+ // If we fail to find a segment for file offset 0, use the address
+ // of the ELF header as the base address.
+ base_address = start_address;
+ for (unsigned i = 0; i != ehdr.e_phnum; ++i) {
+ ElfW(Phdr) phdr;
+ if (ReadFromOffsetExact(
+ mem_fd, &phdr, sizeof(phdr),
+ start_address + ehdr.e_phoff + i * sizeof(phdr)) &&
+ phdr.p_type == PT_LOAD && phdr.p_offset == 0) {
+ base_address = start_address - phdr.p_vaddr;
+ break;
+ }
+ }
+ break;
+ default:
+ // ET_REL or ET_CORE. These aren't directly executable, so they don't
+ // affect the base address.
+ break;
+ }
+ }
+
+ // Check start and end addresses.
+ if (!(start_address <= pc && pc < end_address)) {
+ continue; // We skip this map. PC isn't in this map.
+ }
+
+ // Check flags. We are only interested in "r*x" maps.
+ if (flags_start[0] != 'r' || flags_start[2] != 'x') {
continue; // We skip this map.
}
++cursor; // Skip ' '.
- // Skip to file name. "cursor" now points to file offset. We need to
- // skip at least three spaces for file offset, dev, and inode.
+ // Read file offset.
+ uint64_t file_offset;
+ cursor = GetHex(cursor, eol, &file_offset);
+ if (cursor == eol || *cursor != ' ') {
+ return -1; // Malformed line.
+ }
+ ++cursor; // Skip ' '.
+
+ // Skip to file name. "cursor" now points to dev. We need to
+ // skip at least two spaces for dev and inode.
int num_spaces = 0;
while (cursor < eol) {
if (*cursor == ' ') {
++num_spaces;
- } else if (num_spaces >= 3) {
- // The first non-space character after skipping three spaces
+ } else if (num_spaces >= 2) {
+ // The first non-space character after skipping two spaces
// is the beginning of the file name.
break;
}
@@ -600,12 +664,110 @@
// Finally, "cursor" now points to file name of our interest.
NO_INTR(object_fd = open(cursor, O_RDONLY));
if (object_fd < 0) {
+ // Failed to open object file. Copy the object file name to
+ // |out_file_name|.
+ strncpy(out_file_name, cursor, out_file_name_size);
+ // Making sure |out_file_name| is always null-terminated.
+ out_file_name[out_file_name_size - 1] = '\0';
return -1;
}
return object_fd;
}
}
+int OpenObjectFileContainingPcAndGetStartAddress(uint64_t pc,
+ uint64_t& start_address,
+ uint64_t& base_address,
+ char* out_file_name,
+ size_t out_file_name_size) {
+ if (g_symbolize_open_object_file_callback) {
+ return g_symbolize_open_object_file_callback(
+ pc, start_address, base_address, out_file_name, out_file_name_size);
+ }
+ return OpenObjectFileContainingPcAndGetStartAddressNoHook(
+ pc, start_address, base_address, out_file_name, out_file_name_size);
+}
+
+// POSIX doesn't define any async-signal safe function for converting
+// an integer to ASCII. We'll have to define our own version.
+// itoa_r() converts an (unsigned) integer to ASCII. It returns "buf", if the
+// conversion was successful or NULL otherwise. It never writes more than "sz"
+// bytes. Output will be truncated as needed, and a NUL character is always
+// appended.
+// NOTE: code from sandbox/linux/seccomp-bpf/demo.cc.
+static char* itoa_r(uintptr_t i,
+ char* buf,
+ size_t sz,
+ unsigned base,
+ size_t padding) {
+ // Make sure we can write at least one NUL byte.
+ size_t n = 1;
+ if (n > sz) {
+ return NULL;
+ }
+
+ if (base < 2 || base > 16) {
+ buf[0] = '\000';
+ return NULL;
+ }
+
+ char *start = buf;
+
+ // Loop until we have converted the entire number. Output at least one
+ // character (i.e. '0').
+ char *ptr = start;
+ do {
+ // Make sure there is still enough space left in our output buffer.
+ if (++n > sz) {
+ buf[0] = '\000';
+ return NULL;
+ }
+
+ // Output the next digit.
+ *ptr++ = "0123456789abcdef"[i % base];
+ i /= base;
+
+ if (padding > 0) {
+ padding--;
+ }
+ } while (i > 0 || padding > 0);
+
+ // Terminate the output with a NUL character.
+ *ptr = '\000';
+
+ // Conversion to ASCII actually resulted in the digits being in reverse
+ // order. We can't easily generate them in forward order, as we can't tell
+ // the number of characters needed until we are done converting.
+ // So, now, we reverse the string (except for the possible "-" sign).
+ while (--ptr > start) {
+ char ch = *ptr;
+ *ptr = *start;
+ *start++ = ch;
+ }
+ return buf;
+}
+
+// Safely appends string |source| to string |dest|. Never writes past the
+// buffer size |dest_size| and guarantees that |dest| is null-terminated.
+static void SafeAppendString(const char* source, char* dest, size_t dest_size) {
+ size_t dest_string_length = strlen(dest);
+ SAFE_ASSERT(dest_string_length < dest_size);
+ dest += dest_string_length;
+ dest_size -= dest_string_length;
+ strncpy(dest, source, dest_size);
+ // Making sure |dest| is always null-terminated.
+ dest[dest_size - 1] = '\0';
+}
+
+// Converts a 64-bit value into a hex string, and safely appends it to |dest|.
+// Never writes past the buffer size |dest_size| and guarantees that |dest| is
+// null-terminated.
+static void SafeAppendHexNumber(uint64_t value, char* dest, size_t dest_size) {
+ // 64-bit numbers in hex can have up to 16 digits.
+ char buf[17] = {'\0'};
+ SafeAppendString(itoa_r(value, buf, sizeof(buf), 16, 0), dest, dest_size);
+}
+
// The implementation of our symbolization routine. If it
// successfully finds the symbol containing "pc" and obtains the
// symbol name, returns true and write the symbol name to "out".
@@ -614,11 +776,18 @@
// and "out" is used as its output.
// To keep stack consumption low, we would like this function to not
// get inlined.
-static ATTRIBUTE_NOINLINE bool SymbolizeAndDemangle(void *pc, char *out,
- int out_size) {
+static ATTRIBUTE_NOINLINE bool SymbolizeAndDemangle(void* pc,
+ char* out,
+ size_t out_size) {
uint64_t pc0 = reinterpret_cast<uintptr_t>(pc);
uint64_t start_address = 0;
+ uint64_t base_address = 0;
+ if (out_size < 1) {
+ return false;
+ }
+ out[0] = '\0';
+ SafeAppendString("(", out, out_size);
#if SB_IS(EVERGREEN_COMPATIBLE)
char* file_name = NULL;
EvergreenInfo evergreen_info;
@@ -626,23 +795,44 @@
if (IS_EVERGREEN_ADDRESS(pc, evergreen_info)) {
file_name = evergreen_info.file_path_buf;
start_address = evergreen_info.base_address;
+ base_address = evergreen_info.base_address;
}
}
int object_fd = -1;
if (file_name != NULL) {
object_fd = OpenFile(file_name);
} else {
- object_fd =
- OpenObjectFileContainingPcAndGetStartAddress(pc0, start_address);
+ object_fd = OpenObjectFileContainingPcAndGetStartAddress(
+ pc0, start_address, base_address, out + 1, out_size - 1);
+
}
#else
- int object_fd =
- OpenObjectFileContainingPcAndGetStartAddress(pc0, start_address);
+ int object_fd = OpenObjectFileContainingPcAndGetStartAddress(
+ pc0, start_address, base_address, out + 1, out_size - 1);
#endif
- if (object_fd == -1) {
+
+ FileDescriptor wrapped_object_fd(object_fd);
+
+#if defined(PRINT_UNSYMBOLIZED_STACK_TRACES)
+ {
+#else
+ // Check whether a file name was returned.
+ if (object_fd < 0) {
+#endif
+ if (out[1]) {
+ // The object file containing PC was determined successfully however the
+ // object file was not opened successfully. This is still considered
+ // success because the object file name and offset are known and tools
+ // like asan_symbolize.py can be used for the symbolization.
+ out[out_size - 1] = '\0'; // Making sure |out| is always null-terminated.
+ SafeAppendString("+0x", out, out_size);
+ SafeAppendHexNumber(pc0 - base_address, out, out_size);
+ SafeAppendString(")", out, out_size);
+ return true;
+ }
+ // Failed to determine the object file containing PC. Bail out.
return false;
}
- FileDescriptor wrapped_object_fd(object_fd);
int elf_type = FileGetElfType(wrapped_object_fd.get());
if (elf_type == -1) {
return false;
@@ -651,17 +841,28 @@
// Run the call back if it's installed.
// Note: relocation (and much of the rest of this code) will be
// wrong for prelinked shared libraries and PIE executables.
- uint64 relocation = (elf_type == ET_DYN) ? start_address : 0;
+ uint64_t relocation = (elf_type == ET_DYN) ? start_address : 0;
int num_bytes_written = g_symbolize_callback(wrapped_object_fd.get(),
pc, out, out_size,
relocation);
if (num_bytes_written > 0) {
- out += num_bytes_written;
- out_size -= num_bytes_written;
+ out += static_cast<size_t>(num_bytes_written);
+ out_size -= static_cast<size_t>(num_bytes_written);
}
}
- if (!GetSymbolFromObjectFile(wrapped_object_fd.get(), pc0, out, out_size,
- start_address)) {
+ if (!GetSymbolFromObjectFile(wrapped_object_fd.get(), pc0,
+ out, out_size, base_address)) {
+ if (out[1] && !g_symbolize_callback) {
+ // The object file containing PC was opened successfully however the
+ // symbol was not found. The object may have been stripped. This is still
+ // considered success because the object file name and offset are known
+ // and tools like asan_symbolize.py can be used for the symbolization.
+ out[out_size - 1] = '\0'; // Making sure |out| is always null-terminated.
+ SafeAppendString("+0x", out, out_size);
+ SafeAppendHexNumber(pc0 - base_address, out, out_size);
+ SafeAppendString(")", out, out_size);
+ return true;
+ }
return false;
}
@@ -672,22 +873,26 @@
_END_GOOGLE_NAMESPACE_
-#elif defined(OS_MACOSX) && defined(HAVE_DLADDR)
+#elif (defined(GLOG_OS_MACOSX) || defined(GLOG_OS_EMSCRIPTEN)) && \
+ defined(HAVE_DLADDR)
#include <dlfcn.h>
-#include <string.h>
+#include <cstring>
_START_GOOGLE_NAMESPACE_
-static ATTRIBUTE_NOINLINE bool SymbolizeAndDemangle(void *pc, char *out,
- int out_size) {
+static ATTRIBUTE_NOINLINE bool SymbolizeAndDemangle(void* pc,
+ char* out,
+ size_t out_size) {
Dl_info info;
if (dladdr(pc, &info)) {
- if (strlen(info.dli_sname) < out_size) {
- strcpy(out, info.dli_sname);
- // Symbolization succeeded. Now we try to demangle the symbol.
- DemangleInplace(out, out_size);
- return true;
+ if (info.dli_sname) {
+ if (strlen(info.dli_sname) < out_size) {
+ strcpy(out, info.dli_sname);
+ // Symbolization succeeded. Now we try to demangle the symbol.
+ DemangleInplace(out, out_size);
+ return true;
+ }
}
}
return false;
@@ -695,14 +900,79 @@
_END_GOOGLE_NAMESPACE_
+#elif defined(GLOG_OS_WINDOWS) || defined(GLOG_OS_CYGWIN)
+
+#include <windows.h>
+#include <dbghelp.h>
+
+#ifdef _MSC_VER
+#pragma comment(lib, "dbghelp")
+#endif
+
+_START_GOOGLE_NAMESPACE_
+
+class SymInitializer {
+public:
+ HANDLE process;
+ bool ready;
+ SymInitializer() : process(NULL), ready(false) {
+ // Initialize the symbol handler.
+ // https://msdn.microsoft.com/en-us/library/windows/desktop/ms680344(v=vs.85).aspx
+ process = GetCurrentProcess();
+ // Defer symbol loading.
+ // We do not request undecorated symbols with SYMOPT_UNDNAME
+ // because the mangling library calls UnDecorateSymbolName.
+ SymSetOptions(SYMOPT_DEFERRED_LOADS);
+ if (SymInitialize(process, NULL, true)) {
+ ready = true;
+ }
+ }
+ ~SymInitializer() {
+ SymCleanup(process);
+ // We do not need to close `HANDLE process` because it's a "pseudo handle."
+ }
+private:
+ SymInitializer(const SymInitializer&);
+ SymInitializer& operator=(const SymInitializer&);
+};
+
+static ATTRIBUTE_NOINLINE bool SymbolizeAndDemangle(void* pc,
+ char* out,
+ size_t out_size) {
+ const static SymInitializer symInitializer;
+ if (!symInitializer.ready) {
+ return false;
+ }
+ // Resolve symbol information from address.
+ // https://msdn.microsoft.com/en-us/library/windows/desktop/ms680578(v=vs.85).aspx
+ char buf[sizeof(SYMBOL_INFO) + MAX_SYM_NAME];
+ SYMBOL_INFO *symbol = reinterpret_cast<SYMBOL_INFO *>(buf);
+ symbol->SizeOfStruct = sizeof(SYMBOL_INFO);
+ symbol->MaxNameLen = MAX_SYM_NAME;
+ // We use the ANSI version to ensure the string type is always `char *`.
+ // This could break if a symbol has Unicode in it.
+ BOOL ret = SymFromAddr(symInitializer.process,
+ reinterpret_cast<DWORD64>(pc), 0, symbol);
+ if (ret == 1 && static_cast<ssize_t>(symbol->NameLen) < out_size) {
+ // `NameLen` does not include the null terminating character.
+ strncpy(out, symbol->Name, static_cast<size_t>(symbol->NameLen) + 1);
+ out[static_cast<size_t>(symbol->NameLen)] = '\0';
+ // Symbolization succeeded. Now we try to demangle the symbol.
+ DemangleInplace(out, out_size);
+ return true;
+ }
+ return false;
+}
+
+_END_GOOGLE_NAMESPACE_
+
#else
# error BUG: HAVE_SYMBOLIZE was wrongly set
#endif
_START_GOOGLE_NAMESPACE_
-bool Symbolize(void *pc, char *out, int out_size) {
- SAFE_ASSERT(out_size >= 0);
+bool Symbolize(void* pc, char* out, size_t out_size) {
return SymbolizeAndDemangle(pc, out, out_size);
}
@@ -710,14 +980,14 @@
#else /* HAVE_SYMBOLIZE */
-#include <assert.h>
+#include <cassert>
#include "config.h"
_START_GOOGLE_NAMESPACE_
// TODO: Support other environments.
-bool Symbolize(void *pc, char *out, int out_size) {
+bool Symbolize(void* /*pc*/, char* /*out*/, size_t /*out_size*/) {
assert(0);
return false;
}
diff --git a/base/third_party/symbolize/symbolize.h b/base/third_party/symbolize/symbolize.h
index 609af8f..588f4ab 100644
--- a/base/third_party/symbolize/symbolize.h
+++ b/base/third_party/symbolize/symbolize.h
@@ -51,13 +51,15 @@
// malloc() and other unsafe operations. It should be both
// thread-safe and async-signal-safe.
-// Note for Cobalt: Cobalt Starboard depends on the old version of Symbolize so
-// this file is from m27 Chromium. There are no Cobalt-introduced changes in
-// this file.
-
#ifndef BASE_SYMBOLIZE_H_
#define BASE_SYMBOLIZE_H_
+#if defined (STARBOARD)
+#define GLOG_EXPORT
+#endif
+
+#include <sys/types.h> // for ssize_t
+
#include "utilities.h"
#include "config.h"
#include "glog/logging.h"
@@ -98,17 +100,58 @@
_START_GOOGLE_NAMESPACE_
+// Read up to "count" bytes from "offset" in the file pointed by file
+// descriptor "fd" into the buffer starting at "buf" while handling short reads
+// and EINTR. On success, return the number of bytes read. Otherwise, return
+// -1.
+ssize_t ReadFromOffset(const int fd,
+ void* buf,
+ const size_t count,
+ const size_t offset);
+
// Gets the section header for the given name, if it exists. Returns true on
// success. Otherwise, returns false.
bool GetSectionHeaderByName(int fd, const char *name, size_t name_len,
ElfW(Shdr) *out);
+// Searches for the object file (from /proc/self/maps) that contains
+// the specified pc. If found, sets |start_address| to the start address
+// of where this object file is mapped in memory, sets the module base
+// address into |base_address|, copies the object file name into
+// |out_file_name|, and attempts to open the object file. If the object
+// file is opened successfully, returns the file descriptor. Otherwise,
+// returns -1. |out_file_name_size| is the size of the file name buffer
+// (including the null-terminator).
+ATTRIBUTE_NOINLINE int OpenObjectFileContainingPcAndGetStartAddress(
+ uint64_t pc,
+ uint64_t& start_address,
+ uint64_t& base_address,
+ char* out_file_name,
+ size_t out_file_name_size);
+
_END_GOOGLE_NAMESPACE_
-#endif /* __ELF__ */
+#endif /* __ELF__ */
_START_GOOGLE_NAMESPACE_
+// Thin wrapper around a file descriptor so that the file descriptor
+// gets closed for sure.
+struct FileDescriptor {
+ const int fd_;
+ explicit FileDescriptor(int fd) : fd_(fd) {}
+ ~FileDescriptor();
+ int get() { return fd_; }
+
+ private:
+ FileDescriptor(const FileDescriptor&);
+ void operator=(const FileDescriptor&);
+};
+
+// Restrictions on the callbacks that follow:
+// - The callbacks must not use heaps but only use stacks.
+// - The callbacks must be async-signal-safe.
+
// Installs a callback function, which will be called right before a symbol name
// is printed. The callback is intended to be used for showing a file name and a
// line number preceding a symbol name.
@@ -120,9 +163,28 @@
void* pc,
char* out,
size_t out_size,
- uint64 relocation);
+ uint64_t relocation);
+GLOG_EXPORT
void InstallSymbolizeCallback(SymbolizeCallback callback);
+// Installs a callback function, which will be called instead of
+// OpenObjectFileContainingPcAndGetStartAddress. The callback is expected
+// to searches for the object file (from /proc/self/maps) that contains
+// the specified pc. If found, sets |start_address| to the start address
+// of where this object file is mapped in memory, sets the module base
+// address into |base_address|, copies the object file name into
+// |out_file_name|, and attempts to open the object file. If the object
+// file is opened successfully, returns the file descriptor. Otherwise,
+// returns -1. |out_file_name_size| is the size of the file name buffer
+// (including the null-terminator).
+typedef int (*SymbolizeOpenObjectFileCallback)(uint64_t pc,
+ uint64_t& start_address,
+ uint64_t& base_address,
+ char* out_file_name,
+ size_t out_file_name_size);
+void InstallSymbolizeOpenObjectFileCallback(
+ SymbolizeOpenObjectFileCallback callback);
+
_END_GOOGLE_NAMESPACE_
#endif
@@ -133,7 +195,7 @@
// symbol name to "out". The symbol name is demangled if possible
// (supports symbols generated by GCC 3.x or newer). Otherwise,
// returns false.
-bool Symbolize(void *pc, char *out, int out_size);
+GLOG_EXPORT bool Symbolize(void* pc, char* out, size_t out_size);
_END_GOOGLE_NAMESPACE_
diff --git a/base/third_party/symbolize/utilities.h b/base/third_party/symbolize/utilities.h
index 0bed526..bb206a8 100644
--- a/base/third_party/symbolize/utilities.h
+++ b/base/third_party/symbolize/utilities.h
@@ -1,11 +1,47 @@
-// Copyright (c) 2010 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
+// Copyright (c) 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: Shinichiro Hamaji
+//
+// Define utilties for glog internal usage.
-#include <inttypes.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-typedef uint64_t uint64;
-#define HAVE_SYMBOLIZE 1
-#define ATTRIBUTE_NOINLINE __attribute__ ((noinline))
+#ifndef UTILITIES_H__
+#define UTILITIES_H__
+
+#ifdef HAVE___ATTRIBUTE__
+#define ATTRIBUTE_NOINLINE __attribute__((noinline))
+#define HAVE_ATTRIBUTE_NOINLINE
+#elif defined(GLOG_OS_WINDOWS)
+#define ATTRIBUTE_NOINLINE __declspec(noinline)
+#define HAVE_ATTRIBUTE_NOINLINE
+#else
+#define ATTRIBUTE_NOINLINE
+#endif
+
+#endif // UTILITIES_H__
diff --git a/base/threading/thread_task_runner_handle.cc b/base/threading/thread_task_runner_handle.cc
index 314b303..85b47f2 100644
--- a/base/threading/thread_task_runner_handle.cc
+++ b/base/threading/thread_task_runner_handle.cc
@@ -24,7 +24,7 @@
} // namespace
// static
-scoped_refptr<SingleThreadTaskRunner> ThreadTaskRunnerHandle::Get() {
+const scoped_refptr<SingleThreadTaskRunner>& ThreadTaskRunnerHandle::Get() {
ThreadTaskRunnerHandle* current = thread_task_runner_tls.Pointer()->Get();
CHECK(current) << "Error: This caller requires a single-threaded context "
"(i.e. the current task needs to run from a "
diff --git a/base/threading/thread_task_runner_handle.h b/base/threading/thread_task_runner_handle.h
index f6b71d7..fc66fec 100644
--- a/base/threading/thread_task_runner_handle.h
+++ b/base/threading/thread_task_runner_handle.h
@@ -22,7 +22,7 @@
class BASE_EXPORT ThreadTaskRunnerHandle {
public:
// Gets the SingleThreadTaskRunner for the current thread.
- static scoped_refptr<SingleThreadTaskRunner> Get();
+ static const scoped_refptr<SingleThreadTaskRunner>& Get();
// Returns true if the SingleThreadTaskRunner is already created for
// the current thread.
diff --git a/build/config/win/visual_studio_version.gni b/build/config/win/visual_studio_version.gni
index 74828ec..13ca252 100644
--- a/build/config/win/visual_studio_version.gni
+++ b/build/config/win/visual_studio_version.gni
@@ -36,7 +36,12 @@
if (is_docker_build) {
_default_visual_studio_path = "C:/BuildTools"
} else {
- _default_visual_studio_path = "C:/Program Files (x86)/Microsoft Visual Studio/2017/Professional"
+ if (use_visual_studio_2022) {
+ _vis_std_year = "2022"
+ } else {
+ _vis_std_year = "2017"
+ }
+ _default_visual_studio_path = "C:/Program Files (x86)/Microsoft Visual Studio/$_vis_std_year/Professional"
}
declare_args() {
diff --git a/cobalt/BUILD.gn b/cobalt/BUILD.gn
index 2c221a3..cecfe49 100644
--- a/cobalt/BUILD.gn
+++ b/cobalt/BUILD.gn
@@ -38,6 +38,7 @@
"//cobalt/renderer/sandbox:renderer_sandbox",
"//cobalt/renderer/sandbox:scaling_text_sandbox",
"//cobalt/speech/sandbox:speech_sandbox",
+ "//cobalt/ui_navigation/scroll_engine:scroll_engine_tests",
"//cobalt/web:web_test",
"//cobalt/web_animations:web_animations_test",
"//cobalt/webdriver:webdriver_test",
diff --git a/cobalt/audio/audio_buffer_source_node.cc b/cobalt/audio/audio_buffer_source_node.cc
index 3d1153a..bcc8ba7 100644
--- a/cobalt/audio/audio_buffer_source_node.cc
+++ b/cobalt/audio/audio_buffer_source_node.cc
@@ -20,6 +20,7 @@
#include <memory>
#include <utility>
+#include "base/threading/thread_task_runner_handle.h"
#include "cobalt/audio/audio_context.h"
#include "cobalt/audio/audio_helpers.h"
#include "cobalt/audio/audio_node_output.h"
@@ -35,7 +36,7 @@
AudioBufferSourceNode::AudioBufferSourceNode(
script::EnvironmentSettings* settings, AudioContext* context)
: AudioNode(settings, context),
- task_runner_(base::MessageLoop::current()->task_runner()),
+ task_runner_(base::ThreadTaskRunnerHandle::Get()),
state_(kNone),
read_index_(0),
buffer_source_added_(false),
diff --git a/cobalt/audio/audio_context.cc b/cobalt/audio/audio_context.cc
index 0e7ae9d..45f8568 100644
--- a/cobalt/audio/audio_context.cc
+++ b/cobalt/audio/audio_context.cc
@@ -18,6 +18,7 @@
#include <utility>
#include "base/callback.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "cobalt/base/polymorphic_downcast.h"
#include "cobalt/web/context.h"
#include "cobalt/web/environment_settings.h"
@@ -42,7 +43,7 @@
ALLOW_THIS_IN_INITIALIZER_LIST(
destination_(new AudioDestinationNode(settings, this))),
next_callback_id_(0),
- main_message_loop_(base::MessageLoop::current()->task_runner()) {
+ main_message_loop_(base::ThreadTaskRunnerHandle::Get()) {
DCHECK(main_message_loop_);
}
diff --git a/cobalt/base/fixed_size_lru_cache.h b/cobalt/base/fixed_size_lru_cache.h
index 341c6c0..a1d03d5 100644
--- a/cobalt/base/fixed_size_lru_cache.h
+++ b/cobalt/base/fixed_size_lru_cache.h
@@ -16,6 +16,7 @@
#include <algorithm>
#include <functional>
+#include <utility>
#include "base/basictypes.h"
#include "base/logging.h"
@@ -40,7 +41,7 @@
// Functor that checks if two items have keys that match (based on a template
// argument).
- struct KeyEqual : public std::unary_function<KeyEqual, bool> {
+ struct KeyEqual {
explicit KeyEqual(const Key& key) : to_check(key) {}
bool operator()(const value_type& i) const {
KeyCompare comparator;
diff --git a/cobalt/base/wrap_main_starboard.h b/cobalt/base/wrap_main_starboard.h
index 4d4bd74..db37f6c 100644
--- a/cobalt/base/wrap_main_starboard.h
+++ b/cobalt/base/wrap_main_starboard.h
@@ -53,14 +53,8 @@
DCHECK(!g_loop);
g_loop = new base::MessageLoopForUI();
g_loop->Start();
-#if SB_API_VERSION >= 13
preload_function(data->argument_count, data->argument_values, data->link,
base::Bind(&SbSystemRequestStop, 0), event->timestamp);
-#else // SB_API_VERSION >= 13
- preload_function(data->argument_count, data->argument_values, data->link,
- base::Bind(&SbSystemRequestStop, 0),
- SbTimeGetMonotonicNow());
-#endif // SB_API_VERSION >= 13
g_started = true;
break;
}
@@ -79,14 +73,8 @@
g_loop = new base::MessageLoopForUI();
g_loop->Start();
}
-#if SB_API_VERSION >= 13
start_function(data->argument_count, data->argument_values, data->link,
base::Bind(&SbSystemRequestStop, 0), event->timestamp);
-#else // SB_API_VERSION >= 13
- start_function(data->argument_count, data->argument_values, data->link,
- base::Bind(&SbSystemRequestStop, 0),
- SbTimeGetMonotonicNow());
-#endif // SB_API_VERSION >= 13
g_started = true;
break;
}
@@ -107,29 +95,18 @@
g_at_exit = NULL;
break;
}
-#if SB_API_VERSION >= 13
case kSbEventTypeBlur:
case kSbEventTypeFocus:
case kSbEventTypeConceal:
case kSbEventTypeReveal:
case kSbEventTypeFreeze:
case kSbEventTypeUnfreeze:
-#else
- case kSbEventTypePause:
- case kSbEventTypeUnpause:
- case kSbEventTypeSuspend:
- case kSbEventTypeResume:
-#endif // SB_API_VERSION >= 13
case kSbEventTypeInput:
case kSbEventTypeUser:
case kSbEventTypeLink:
case kSbEventTypeVerticalSync:
case kSbEventTypeScheduled:
-#if SB_API_VERSION >= 13
case kSbEventTypeAccessibilitySettingsChanged:
-#else
- case kSbEventTypeAccessiblitySettingsChanged:
-#endif // SB_API_VERSION >= 13
case kSbEventTypeLowMemory:
case kSbEventTypeWindowSizeChanged:
case kSbEventTypeOnScreenKeyboardShown:
@@ -138,18 +115,10 @@
case kSbEventTypeOnScreenKeyboardBlurred:
case kSbEventTypeOnScreenKeyboardSuggestionsUpdated:
case kSbEventTypeAccessibilityCaptionSettingsChanged:
-#if SB_API_VERSION >= 13
case kSbEventTypeAccessibilityTextToSpeechSettingsChanged:
-#else
- case kSbEventTypeAccessiblityTextToSpeechSettingsChanged:
-#endif // SB_API_VERSION >= 13
-#if SB_API_VERSION >= 13
case kSbEventTypeOsNetworkDisconnected:
case kSbEventTypeOsNetworkConnected:
-#endif
-#if SB_API_VERSION >= 13
case kSbEventDateTimeConfigurationChanged:
-#endif
event_function(event);
break;
}
diff --git a/cobalt/bindings/code_generator_cobalt.py b/cobalt/bindings/code_generator_cobalt.py
index e82d1eb..6756410 100644
--- a/cobalt/bindings/code_generator_cobalt.py
+++ b/cobalt/bindings/code_generator_cobalt.py
@@ -77,8 +77,7 @@
if (special in operation.specials and operation.arguments and
str(operation.arguments[0].idl_type) == 'unsigned long'))
assert len(special_operations) <= 1, (
- 'Multiple indexed %ss defined on interface: %s' %
- (special, interface.name))
+ f'Multiple indexed {special}s defined on interface: {interface.name}')
return special_operations[0] if special_operations else None
@@ -106,7 +105,7 @@
if (special in operation.specials and operation.arguments and
str(operation.arguments[0].idl_type) == 'DOMString'))
assert len(special_operations) <= 1, (
- 'Multiple named %ss defined on interface: %s' % (special, interface.name))
+ f'Multiple named {special}s defined on interface: {interface.name}')
return special_operations[0] if special_operations else None
@@ -186,9 +185,8 @@
__metaclass__ = abc.ABCMeta
def __init__(self, templates_dir, info_provider, cache_dir, output_dir):
- super(CodeGeneratorCobalt,
- self).__init__('CodeGeneratorCobalt', info_provider, cache_dir,
- output_dir)
+ super().__init__('CodeGeneratorCobalt', info_provider, cache_dir,
+ output_dir)
# CodeGeneratorBase inititalizes this with the v8 template path, so
# reinitialize it with cobalt's template path
@@ -225,7 +223,7 @@
if definition_name in definitions.enumerations:
return self.generate_enum_code(definition_name,
definitions.enumerations[definition_name])
- raise ValueError('%s is not in IDL definitions' % definition_name)
+ raise ValueError(f'{definition_name} is not in IDL definitions')
def generate_interface_code(self, definitions, interface_name, interface):
interface_info = self.info_provider.interfaces_info[interface_name]
@@ -326,7 +324,7 @@
return {
'fully_qualified_name':
- '%s::%s' % (namespace, dictionary_name),
+ f'{namespace}::{dictionary_name}',
'include':
self.path_builder.DictionaryHeaderIncludePath(dictionary_name),
'conditional':
@@ -338,7 +336,7 @@
def referenced_enum_context(self, enum_name):
namespace = '::'.join(self.path_builder.NamespaceComponents(enum_name))
return {
- 'fully_qualified_name': '%s::%s' % (namespace, enum_name),
+ 'fully_qualified_name': f'{namespace}::{enum_name}',
'include': self.path_builder.EnumHeaderIncludePath(enum_name),
'conditional': None,
'is_callback_interface': False,
@@ -381,7 +379,7 @@
impl_name = interface_info.get('implemented_as') or interface_name
referenced_classes.append({
'fully_qualified_name':
- '%s::%s' % (namespace, impl_name),
+ f'{namespace}::{impl_name}',
'include':
self.path_builder.ImplementationHeaderPath(interface_name),
'conditional':
@@ -392,8 +390,7 @@
if include_bindings_class:
referenced_classes.append({
'fully_qualified_name':
- '%s::%s' %
- (namespace, self.path_builder.BindingsClass(impl_name)),
+ f'{namespace}::{self.path_builder.BindingsClass(impl_name)}',
'include':
self.path_builder.BindingsHeaderIncludePath(interface_name),
'conditional':
@@ -442,7 +439,7 @@
referenced_interface_names.add(dictionary.parent)
parent_namespace = '::'.join(
self.path_builder.NamespaceComponents(dictionary.parent))
- context['parent'] = '%s::%s' % (parent_namespace, dictionary.parent)
+ context['parent'] = f'{parent_namespace}::{dictionary.parent}'
referenced_class_contexts = self.referenced_class_contexts(
referenced_interface_names, for_conversion)
@@ -626,7 +623,7 @@
templates_dir = argv[2]
dummy_filename = argv[3]
except IndexError:
- print('Usage: %s CACHE_DIR TEMPLATES_DIR DUMMY_FILENAME' % argv[0])
+ print(f'Usage: {argv[0]} CACHE_DIR TEMPLATES_DIR DUMMY_FILENAME')
return 1
# Delete all jinja2 .cache files, since they will get regenerated anyways.
@@ -659,7 +656,7 @@
# Create a dummy file as output for the build system,
# since filenames of individual cache files are unpredictable and opaque
# (they are hashes of the template path, which varies based on environment)
- with open(dummy_filename, 'w') as dummy_file:
+ with open(dummy_filename, 'w', encoding='utf-8') as dummy_file:
pass # |open| creates or touches the file
diff --git a/cobalt/bindings/contexts.py b/cobalt/bindings/contexts.py
index 10b02da..7cdbea0 100644
--- a/cobalt/bindings/contexts.py
+++ b/cobalt/bindings/contexts.py
@@ -70,8 +70,8 @@
"""Mapping to cobalt value filtering for dictionary acceptable values."""
if is_any_type(idl_type) and not idl_literal.is_null:
raise ValueError('Unsupported default value in dictionary: '
- '\'%s %s = %s\'. Only null default is supported.' %
- (idl_type, name, idl_literal))
+ f"'{idl_type} {name} = {idl_literal}'. "
+ 'Only null default is supported.')
return idl_literal_to_cobalt_literal(idl_type, idl_literal)
@@ -207,7 +207,7 @@
flags.append('kConversionFlagObjectOnly')
if flags:
- return '(%s)' % ' | '.join(flags)
+ return f"({' | '.join(flags)})"
else:
return 'kNoConversionFlags'
@@ -233,7 +233,7 @@
not idl_type.is_callback_interface), 'Callback types not supported.'
element_cobalt_type = self.idl_type_to_cobalt_type(
self.resolve_typedef(element_idl_type))
- return '::cobalt::script::Sequence< %s >' % element_cobalt_type
+ return f'::cobalt::script::Sequence< {element_cobalt_type} >'
def idl_promise_type_to_cobalt(self, idl_type):
"""Map IDL promise type to C++ promise type implementation."""
@@ -244,8 +244,8 @@
not idl_type.is_callback_interface), 'Callback types not supported.'
element_cobalt_type = self.idl_type_to_cobalt_type(
self.resolve_typedef(result_idl_type))
- result = 'std::unique_ptr<::cobalt::script::Promise< %s* > >' % (
- element_cobalt_type)
+ result = ('std::unique_ptr<::cobalt::script::Promise'
+ f'< {element_cobalt_type}* > >')
return result
def idl_union_type_to_cobalt(self, idl_type):
@@ -269,11 +269,11 @@
# Some member types need to be wrapped with ScriptValue::Handle.
if is_any_type(flattened_type) or is_array_buffer_or_view_type(
flattened_type):
- cobalt_type = '::cobalt::script::Handle<{}>'.format(cobalt_type)
+ cobalt_type = f'::cobalt::script::Handle<{cobalt_type}>'
cobalt_types.append(cobalt_type)
- return '::cobalt::script::UnionType%d<%s >' % (len(cobalt_types),
- ', '.join(cobalt_types))
+ return (f'::cobalt::script::UnionType{len(cobalt_types)}<'
+ f"{', '.join(cobalt_types)} >")
def get_implemented_interface_name(self, idl_type):
interface_name = get_interface_name(idl_type)
@@ -291,11 +291,11 @@
elif idl_type.is_string_type:
cobalt_type = idl_string_type_to_cobalt(idl_type)
elif idl_type.is_callback_interface:
- cobalt_type = '::cobalt::script::CallbackInterfaceTraits<%s >' % (
- self.get_implemented_interface_name(idl_type))
+ cobalt_type = ('::cobalt::script::CallbackInterfaceTraits<'
+ f'{self.get_implemented_interface_name(idl_type)} >')
elif idl_type.is_interface_type:
- cobalt_type = 'scoped_refptr<%s>' % self.get_implemented_interface_name(
- idl_type)
+ cobalt_type = ('scoped_refptr<'
+ f'{self.get_implemented_interface_name(idl_type)}>')
elif idl_type.is_union_type:
cobalt_type = self.idl_union_type_to_cobalt(idl_type)
elif idl_type.is_enum:
@@ -315,11 +315,11 @@
elif is_promise_type(idl_type):
cobalt_type = self.idl_promise_type_to_cobalt(idl_type)
elif is_array_buffer_or_view_type(idl_type):
- cobalt_type = '::cobalt::script::{}'.format(idl_type.base_type)
- assert cobalt_type, 'Unsupported idl_type %s' % idl_type
+ cobalt_type = f'::cobalt::script::{idl_type.base_type}'
+ assert cobalt_type, f'Unsupported idl_type {idl_type}'
if cobalt_type_is_optional(idl_type):
- cobalt_type = 'base::Optional<%s >' % cobalt_type
+ cobalt_type = f'base::Optional<{cobalt_type} >'
return cobalt_type
@@ -332,7 +332,7 @@
else:
cobalt_type = self.idl_type_to_cobalt_type(idl_type)
if getattr(typed_object, 'is_variadic', False):
- cobalt_type = 'std::vector<%s>' % cobalt_type
+ cobalt_type = f'std::vector<{cobalt_type}>'
return cobalt_type
def typed_object_to_arg_type(self, interface, typed_object):
@@ -344,11 +344,11 @@
idl_type.is_callback_interface):
return base_type + '*'
if is_any_type(idl_type) or is_array_buffer_or_view_type(idl_type):
- return 'const ::cobalt::script::ScriptValue<%s>*' % base_type
+ return f'const ::cobalt::script::ScriptValue<{base_type}>*'
elif cobalt_type_is_optional(idl_type) or is_sequence_type(idl_type) or (
idl_type.is_string_type or idl_type.is_interface_type or
idl_type.is_union_type):
- return 'const %s&' % base_type
+ return f'const {base_type}&'
return base_type
def argument_context(self, interface, argument):
@@ -466,10 +466,10 @@
if operation.name:
cobalt_name = capitalize_function_name(operation.name)
elif is_indexed:
- cobalt_name = 'AnonymousIndexed%s' % function_suffix[special_type]
+ cobalt_name = f'AnonymousIndexed{function_suffix[special_type]}'
else:
assert is_named
- cobalt_name = 'AnonymousNamed%s' % function_suffix[special_type]
+ cobalt_name = f'AnonymousNamed{function_suffix[special_type]}'
context = {
'name': cobalt_name,
diff --git a/cobalt/bindings/flatten_idls.py b/cobalt/bindings/flatten_idls.py
index 2d463a1..c5e5faa 100644
--- a/cobalt/bindings/flatten_idls.py
+++ b/cobalt/bindings/flatten_idls.py
@@ -116,7 +116,7 @@
"""
flattened_interfaces = list(flattened_interfaces)
assert all((isinstance(i, cls) for i in flattened_interfaces))
- with open(output_file, 'w') as f:
+ with open(output_file, 'w', encoding='utf-8') as f:
pickle.dump(flattened_interfaces, f)
@classmethod
@@ -128,7 +128,7 @@
Returns:
A list of FlattenedInterface objects.
"""
- with open(pickle_file) as f:
+ with open(pickle_file, encoding='utf-8') as f:
unpickled_list = pickle.load(f)
assert all((isinstance(item, cls) for item in unpickled_list))
return unpickled_list
@@ -153,7 +153,7 @@
"""Parse the list of idl_files and return a list of FlattenedInterfaces."""
# Import idl_reader here rather than at the beginning of the file because the
# module import path will be set based on an argument to this script.
- import idl_reader # pylint: disable=g-import-not-at-top
+ import idl_reader # pylint: disable=import-outside-toplevel
# Create an IdlReader that will parse the IDL and return the internal
# representation of the interface.
@@ -187,7 +187,7 @@
partial_interfaces[name].append(interface)
else:
assert name not in interfaces, ('Multiple IDL files found for '
- 'interface: %s') % name
+ f'interface: {name}')
interfaces[name] = interface
# Merge partial interfaces into their main interface.
@@ -243,13 +243,13 @@
parser.add_argument(
'--blink_scripts_dir',
required=True,
- help='Specify the directory from which blink\'s scripts should be imported.'
- )
+ help=('Specify the directory from which blink\'s scripts should be '
+ 'imported.'))
options = parser.parse_args(argv)
if not os.path.isdir(options.blink_scripts_dir):
- raise RuntimeError('%s is not a directory' % options.blink_scripts_dir)
+ raise RuntimeError(f'{options.blink_scripts_dir} is not a directory')
# Set the script directory dynamically. This ensures that Blink's IDLs will
# be loaded using Blink's IDL parsing scripts (and Cobalt IDLs will be loaded
diff --git a/cobalt/bindings/idl_compiler_cobalt.py b/cobalt/bindings/idl_compiler_cobalt.py
index da34fe7..6b8c09d 100644
--- a/cobalt/bindings/idl_compiler_cobalt.py
+++ b/cobalt/bindings/idl_compiler_cobalt.py
@@ -52,9 +52,8 @@
if options.output_directory is None:
parser.error('Must specify output directory using --output-directory.')
if len(args) != 1:
- parser.error(
- 'Must specify exactly 1 input file as argument, but %d given.' %
- len(args))
+ parser.error('Must specify exactly 1 input file as argument, '
+ f'but {len(args)} given.')
idl_filename = os.path.realpath(args[0])
return options, idl_filename
diff --git a/cobalt/bindings/name_conversion.py b/cobalt/bindings/name_conversion.py
index 00673c8..17c17d5 100644
--- a/cobalt/bindings/name_conversion.py
+++ b/cobalt/bindings/name_conversion.py
@@ -36,7 +36,7 @@
# Regular expression to capture all of the special tokens.
special_token_re = re.compile(
- '(%s)' % '|'.join(special_token_list), flags=re.IGNORECASE)
+ f"({'|'.join(special_token_list)})", flags=re.IGNORECASE)
# Split tokens on non-alphanumeric characters (excluding underscores).
enumeration_value_word_delimeter_re = re.compile(r'[^a-zA-Z0-9]')
@@ -46,8 +46,8 @@
cobalt_name = titlecase_word_delimiter_re.sub('_', class_name).lower()
for term in word_list:
replacement = [
- token for token in re.split('_?(%s)_?' %
- term, cobalt_name, re.IGNORECASE) if token
+ token for token in re.split(f'_?({term})_?', cobalt_name, re.IGNORECASE)
+ if token
]
cobalt_name = '_'.join(replacement)
return cobalt_name
@@ -79,9 +79,9 @@
def convert_to_cobalt_enumeration_value(enum_type, enum_value):
- return 'k%s%s' % (enum_type, ''.join(
- (token.capitalize()
- for token in enumeration_value_word_delimeter_re.split(enum_value))))
+ tokens = enumeration_value_word_delimeter_re.split(enum_value)
+ capitalized_tokens_joined = ''.join(token.capitalize() for token in tokens)
+ return f'k{enum_type}{capitalized_tokens_joined}'
def get_interface_name(idl_type):
diff --git a/cobalt/bindings/overload_context.py b/cobalt/bindings/overload_context.py
index e44d989..ae3faaa 100644
--- a/cobalt/bindings/overload_context.py
+++ b/cobalt/bindings/overload_context.py
@@ -51,7 +51,7 @@
effective_overload_set_by_length(overloaded_methods))
overload_context['length'] = min(
- [length for length, _ in effective_overloads_by_length])
+ length for length, _ in effective_overloads_by_length)
# The first four steps of the overload resolution algorithm involve
# removing entries from the set S of all overloads where the number
diff --git a/cobalt/bindings/path_generator.py b/cobalt/bindings/path_generator.py
index ee91538..9041278 100644
--- a/cobalt/bindings/path_generator.py
+++ b/cobalt/bindings/path_generator.py
@@ -40,7 +40,7 @@
@property
def generated_conversion_header_path(self):
return os.path.join(self.generated_root,
- '%s_gen_type_conversion.h' % self.engine_prefix)
+ f'{self.engine_prefix}_gen_type_conversion.h')
@property
def generated_conversion_include_path(self):
@@ -59,7 +59,7 @@
enum_info = self.info_provider.enumerations[interface_name]
idl_path = enum_info['full_path']
else:
- raise KeyError('Unknown interface name %s' % interface_name)
+ raise KeyError(f'Unknown interface name {interface_name}')
rel_idl_path = os.path.relpath(idl_path, self.interfaces_root)
components = os.path.dirname(rel_idl_path).split(os.sep)
@@ -75,8 +75,7 @@
def FullBindingsClassName(self, impl_name, interface_name):
"""Get the fully qualified name of the generated bindings class."""
- return '%s::%s' % (self.Namespace(interface_name),
- self.BindingsClass(impl_name))
+ return f'{self.Namespace(interface_name)}::{self.BindingsClass(impl_name)}'
def FullClassName(self, impl_name, interface_name=None):
"""Get the fully qualified name of the implementation class."""
@@ -107,7 +106,7 @@
interface_info['full_path'],
forward_slashes=True,
output_directory=self.generated_root,
- output_prefix='%s_' % self.engine_prefix,
+ output_prefix=f'{self.engine_prefix}_',
output_extension='h',
base_directory=os.path.dirname(self.interfaces_root))
@@ -118,7 +117,7 @@
interface_info['full_path'],
forward_slashes=True,
output_directory=self.generated_root,
- output_prefix='%s_' % self.engine_prefix,
+ output_prefix=f'{self.engine_prefix}_',
output_extension='cc',
base_directory=os.path.dirname(self.interfaces_root))
@@ -144,7 +143,7 @@
interface_info['full_path'],
forward_slashes=True,
output_directory=self.generated_root,
- output_prefix='%s_' % self.engine_prefix,
+ output_prefix=f'{self.engine_prefix}_',
output_extension='cc',
base_directory=os.path.dirname(self.interfaces_root))
@@ -170,6 +169,6 @@
interface_info['full_path'],
forward_slashes=True,
output_directory=self.generated_root,
- output_prefix='%s_' % self.engine_prefix,
+ output_prefix=f'{self.engine_prefix}_',
output_extension='cc',
base_directory=os.path.dirname(self.interfaces_root))
diff --git a/cobalt/bindings/testing/bindings_sandbox_main.cc b/cobalt/bindings/testing/bindings_sandbox_main.cc
index 83adb20..3e57a22 100644
--- a/cobalt/bindings/testing/bindings_sandbox_main.cc
+++ b/cobalt/bindings/testing/bindings_sandbox_main.cc
@@ -15,6 +15,7 @@
#include <iostream>
#include <string>
+#include "base/threading/thread_task_runner_handle.h"
#include "cobalt/base/wrap_main.h"
#include "cobalt/bindings/testing/window.h"
#include "cobalt/script/javascript_engine.h"
@@ -35,7 +36,7 @@
DCHECK(!g_javascript_runner);
g_javascript_runner = new cobalt::script::StandaloneJavascriptRunner(
- base::MessageLoop::current()->task_runner(), javascript_engine_options,
+ base::ThreadTaskRunnerHandle::Get(), javascript_engine_options,
test_window);
DCHECK(g_javascript_runner);
g_javascript_runner->RunUntilDone(quit_closure);
diff --git a/cobalt/bindings/update_blink_idls.py b/cobalt/bindings/update_blink_idls.py
index 5bba317..823d360 100644
--- a/cobalt/bindings/update_blink_idls.py
+++ b/cobalt/bindings/update_blink_idls.py
@@ -15,7 +15,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
"""Generate IDL files for interfaces that are not implemented in Cobalt.
This will do a shallow clone of the chromium repository and gather the set of
@@ -59,8 +58,10 @@
branch: Name of a branch in Chromium's git repository.
destination_dir: Directory into which Chromium repository will be cloned.
"""
- clone_command = ['git', 'clone', '--depth', '1', '--branch', branch,
- _CHROMIUM_REPOSITORY_URL, '.']
+ clone_command = [
+ 'git', 'clone', '--depth', '1', '--branch', branch,
+ _CHROMIUM_REPOSITORY_URL, '.'
+ ]
subprocess.check_call(clone_command, cwd=destination_dir)
@@ -80,7 +81,7 @@
output_dir: Directory into which the generated IDL file will be written.
"""
output_idl_filename = os.path.join(output_dir, interface_name) + '.idl'
- with open(output_idl_filename, 'w') as f:
+ with open(output_idl_filename, 'w', encoding='utf-8') as f:
f.write(_UNIMPLEMENTED_INTERFACE_TEMPLATE.format(interface_name))
@@ -95,15 +96,16 @@
"""
output_idl_filename = os.path.join(output_dir,
interface.name) + '_unsupported.idl'
- with open(output_idl_filename, 'w') as f:
- f.write('partial interface %s {\n' % interface.name)
+ with open(output_idl_filename, 'w', encoding='utf-8') as f:
+ f.write(f'partial interface {interface.name} '
+ '{\n')
for c in interface.constants:
# Type doesn't matter so use long
- f.write(' [NotSupported] const long %s;\n' % c)
+ f.write(f' [NotSupported] const long {c};\n')
for a in interface.attributes:
- f.write(' [NotSupported] attribute long %s;\n' % a)
+ f.write(f' [NotSupported] attribute long {a};\n')
for o in interface.operations:
- f.write(' [NotSupported] void %s();\n' % o)
+ f.write(f' [NotSupported] void {o}();\n')
f.write('}\n')
@@ -121,16 +123,16 @@
help='Directory containing a chromium repository. If the --branch '
'argument is set, the directory will be clobbered and the specified '
'branch will be cloned into this directory.')
- parser.add_argument('--output_dir',
- required=True,
- help='Directory into which IDL files will be placed. '
- 'The current contents will be clobbered.')
+ parser.add_argument(
+ '--output_dir',
+ required=True,
+ help='Directory into which IDL files will be placed. '
+ 'The current contents will be clobbered.')
options = parser.parse_args(argv)
logging_format = '%(asctime)s %(levelname)-8s %(message)s'
- logging.basicConfig(level=logging.INFO,
- format=logging_format,
- datefmt='%m-%d %H:%M')
+ logging.basicConfig(
+ level=logging.INFO, format=logging_format, datefmt='%m-%d %H:%M')
temp_dir = tempfile.mkdtemp()
@@ -153,25 +155,28 @@
# Gather the blink IDLs
logging.info('Gathering blink IDLs.')
blink_pickle_file = os.path.join(temp_dir, 'blink_idl.pickle')
- subprocess.check_call(
- ['python', 'flatten_idls.py', '--directory', os.path.join(
- chromium_dir, 'third_party/WebKit/Source/core'), '--directory',
- os.path.join(chromium_dir, 'third_party/WebKit/Source/modules'),
- '--ignore', '*/InspectorInstrumentation.idl', '--blink_scripts_dir',
- os.path.join(chromium_dir,
- 'third_party/WebKit/Source/bindings/scripts'),
- '--output_path', blink_pickle_file])
+ subprocess.check_call([
+ 'python', 'flatten_idls.py', '--directory',
+ os.path.join(chromium_dir,
+ 'third_party/WebKit/Source/core'), '--directory',
+ os.path.join(chromium_dir, 'third_party/WebKit/Source/modules'),
+ '--ignore', '*/InspectorInstrumentation.idl', '--blink_scripts_dir',
+ os.path.join(chromium_dir,
+ 'third_party/WebKit/Source/bindings/scripts'),
+ '--output_path', blink_pickle_file
+ ])
# Gather Cobalt's IDLs
logging.info('Gathering Cobalt IDLs.')
cobalt_root = os.path.join(_SCRIPT_DIR, '../../../')
cobalt_pickle_file = os.path.join(temp_dir, 'cobalt_idl.pickle')
- subprocess.check_call(
- ['python', 'flatten_idls.py', '--directory', os.path.join(
- cobalt_root, 'cobalt'), '--ignore', '*/cobalt/bindings/*',
- '--blink_scripts_dir', os.path.join(
- cobalt_root, 'third_party/blink/Source/bindings/scripts'),
- '--output_path', cobalt_pickle_file])
+ subprocess.check_call([
+ 'python', 'flatten_idls.py', '--directory',
+ os.path.join(cobalt_root, 'cobalt'), '--ignore', '*/cobalt/bindings/*',
+ '--blink_scripts_dir',
+ os.path.join(cobalt_root, 'third_party/blink/Source/bindings/scripts'),
+ '--output_path', cobalt_pickle_file
+ ])
# Unpickle the files.
blink_interfaces = _LoadInterfaces(blink_pickle_file)
diff --git a/cobalt/bindings/v8c/_env.py b/cobalt/bindings/v8c/_env.py
index 332eabc..ea1f9b1 100644
--- a/cobalt/bindings/v8c/_env.py
+++ b/cobalt/bindings/v8c/_env.py
@@ -21,6 +21,6 @@
_ENV = path.abspath(path.join(path.dirname(__file__), path.pardir, '_env.py'))
if not path.exists(_ENV):
- print('%s: Can\'t find repo root.\nMissing parent: %s' % (__file__, _ENV))
+ print(f'{__file__}: Can\'t find repo root.\nMissing parent: {_ENV}')
sys.exit(1)
load_source('', _ENV)
diff --git a/cobalt/bindings/v8c/code_generator_v8c.py b/cobalt/bindings/v8c/code_generator_v8c.py
index 3e11cf6..4c566eb 100644
--- a/cobalt/bindings/v8c/code_generator_v8c.py
+++ b/cobalt/bindings/v8c/code_generator_v8c.py
@@ -26,22 +26,22 @@
"""Implementation of ExpressionGenerator for V8."""
def is_undefined(self, arg):
- return '{}->IsUndefined()'.format(arg)
+ return f'{arg}->IsUndefined()'
def is_undefined_or_null(self, arg):
- return '{}->IsNullOrUndefined()'.format(arg)
+ return f'{arg}->IsNullOrUndefined()'
def inherits_interface(self, interface_name, arg):
- return ('{}->IsObject() ? '
+ return (f'{arg}->IsObject() ? '
'wrapper_factory->DoesObjectImplementInterface(object, '
- 'base::GetTypeId<{}>()) : false').format(arg, interface_name)
+ f'base::GetTypeId<{interface_name}>()) : false')
def is_number(self, arg):
- return '{}->IsNumber()'.format(arg)
+ return f'{arg}->IsNumber()'.format(arg)
def is_type(self, interface_name, arg):
- return ('{}->IsObject() ? '
- 'object->Is{}(): false').format(arg, interface_name)
+ return (f'{arg}->IsObject() ? '
+ f'object->Is{interface_name}(): false')
class CodeGeneratorV8c(CodeGeneratorCobalt):
@@ -52,7 +52,7 @@
def __init__(self, *args, **kwargs):
module_path, _ = os.path.split(os.path.realpath(__file__))
templates_dir = os.path.normpath(os.path.join(module_path, 'templates'))
- super(CodeGeneratorV8c, self).__init__(templates_dir, *args, **kwargs)
+ super().__init__(templates_dir, *args, **kwargs)
def build_interface_context(self, interface, interface_info, definitions):
# Due to a V8 internals quirks, named constructor attributes MUST come
@@ -60,13 +60,14 @@
# as the "name" property on the function template value, overriding the
# originally selected name from |FunctionTemplate::SetClassName|. Efforts
# to document/modify this behavior in V8 are underway.
- context = super(CodeGeneratorV8c, self).build_interface_context(
- interface, interface_info, definitions)
+ context = super().build_interface_context(interface, interface_info,
+ definitions)
context['all_attributes_v8_order_quirk'] = sorted(
- [a for a in context['attributes'] + context['static_attributes']],
+ context['attributes'] + context['static_attributes'],
key=lambda a: not a.get('is_named_constructor_attribute', False))
return context
+ # pylint: disable=invalid-overridden-method
@property
def generated_file_prefix(self):
return 'v8c'
@@ -74,3 +75,5 @@
@property
def expression_generator(self):
return CodeGeneratorV8c._expression_generator
+
+ # pylint: enable=invalid-overridden-method
diff --git a/cobalt/black_box_tests/black_box_cobalt_runner.py b/cobalt/black_box_tests/black_box_cobalt_runner.py
index 1f333e3..9e83f09 100644
--- a/cobalt/black_box_tests/black_box_cobalt_runner.py
+++ b/cobalt/black_box_tests/black_box_cobalt_runner.py
@@ -48,7 +48,9 @@
url,
log_file=None,
target_params=None,
- success_message=None):
+ success_message=None,
+ poll_until_wait_seconds=POLL_UNTIL_WAIT_SECONDS,
+ **kwargs):
# For black box tests, don't log inline script warnings, we intend to
# explicitly control timings for suspends and resumes, so we are not
# concerned about a "suspend at the wrong time".
@@ -56,8 +58,16 @@
target_params = []
target_params.append('--silence_inline_script_warnings')
- super().__init__(launcher_params, url, log_file, target_params,
- success_message)
+ super().__init__(
+ launcher_params,
+ url,
+ log_file,
+ target_params,
+ success_message,
+ poll_until_wait_seconds=poll_until_wait_seconds,
+ **kwargs)
+
+ self.poll_until_wait_seconds = poll_until_wait_seconds
def PollUntilFoundOrTestsFailedWithReconnects(self, css_selector):
"""Polls until an element is found.
@@ -66,7 +76,7 @@
css_selector: A CSS selector
"""
start_time = time.time()
- while time.time() - start_time < POLL_UNTIL_WAIT_SECONDS:
+ while time.time() - start_time < self.poll_until_wait_seconds:
is_failed = False
try:
if self.FindElements(css_selector):
@@ -76,7 +86,8 @@
selenium_exceptions.NoSuchElementException,
selenium_exceptions.NoSuchWindowException,
selenium_exceptions.WebDriverException) as e:
- # If the page
+ # If the page is reloaded, the webdriver client status becomes
+ # stale and should be reconnected.
logging.warning(e)
self.ReconnectWebDriver()
continue
diff --git a/cobalt/black_box_tests/black_box_tests.py b/cobalt/black_box_tests/black_box_tests.py
old mode 100644
new mode 100755
index 079b829..d8cea51
--- a/cobalt/black_box_tests/black_box_tests.py
+++ b/cobalt/black_box_tests/black_box_tests.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python3
# Copyright 2017 The Cobalt Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -41,6 +42,15 @@
'raspi-0/devel',
]
+_EVERGREEN_COMPATIBLE_CONFIGS = [
+ # TODO(b/283788059): enable when there are GitHub jobs to run integration
+ # and Black Box Tests on evergreen-arm-hardfp.
+ #'evergreen-arm/devel',
+ # TODO(b/283144901): enable when the Starboard 16 binaries are released for
+ # Evergreen.
+ #'evergreen-x64/devel',
+]
+
_PORT_SELECTION_RETRY_LIMIT = 10
_PORT_SELECTION_RANGE = [5000, 7000]
# List of blocked ports.
@@ -81,23 +91,34 @@
'service_worker_persist_test',
'soft_mic_platform_service_test',
'text_encoding_test',
+ 'wasm_basic_test',
'web_debugger',
- 'web_platform_tests',
'web_worker_test',
'worker_csp_test',
'worker_load_test',
]
+# These are very different and require a custom config + proxy
+_WPT_TESTS = [
+ 'web_platform_tests',
+]
# These tests can only be run on platforms whose app launcher can send deep
# links.
_TESTS_NEEDING_DEEP_LINK = [
'deep_links',
]
+# These tests can only run on Evergreen-compatible platforms.
+_TESTS_EVERGREEN_END_TO_END = [
+ 'evergreen_verify_qa_channel_update_test',
+]
# Location of test files.
_TEST_DIR_PATH = 'cobalt.black_box_tests.tests.'
+
+_LAUNCH_TARGET = 'cobalt'
+
# Platform configuration and device information parameters.
_launcher_params = None
# Binding address used to create the test server.
-_binding_address = None
+_server_binding_address = None
# Port used to create the web platform test http server.
_wpt_http_port = None
@@ -115,6 +136,7 @@
@classmethod
def setUpClass(cls):
super(BlackBoxTestCase, cls).setUpClass()
+ logging.info('\n\n\n%s\n\n', '=' * 40)
logging.info('Running %s', cls.__name__)
@classmethod
@@ -122,27 +144,28 @@
super(BlackBoxTestCase, cls).tearDownClass()
logging.info('Done %s', cls.__name__)
- def CreateCobaltRunner(self, url=None, target_params=None):
+ def CreateCobaltRunner(self, url=None, target_params=None, **kwargs):
all_target_params = list(target_params) if target_params else []
if _launcher_params.target_params is not None:
all_target_params += _launcher_params.target_params
- new_runner = black_box_cobalt_runner.BlackBoxCobaltRunner(
+
+ return black_box_cobalt_runner.BlackBoxCobaltRunner(
launcher_params=_launcher_params,
url=url,
- target_params=all_target_params)
- return new_runner
+ target_params=all_target_params,
+ **kwargs)
def GetBindingAddress(self):
- return _binding_address
+ return _server_binding_address
def GetWptHttpPort(self):
return _wpt_http_port
-def LoadTests(launcher_params):
+def LoadTests(launcher_params, test_set):
launcher = abstract_launcher.LauncherFactory(
launcher_params.platform,
- 'cobalt',
+ _LAUNCH_TARGET,
launcher_params.config,
device_id=launcher_params.device_id,
target_params=None,
@@ -152,13 +175,44 @@
loader_config=launcher_params.loader_config,
loader_out_directory=launcher_params.loader_out_directory)
- test_targets = _TESTS_NO_SIGNAL
+ test_targets = []
- if launcher.SupportsSuspendResume():
- test_targets += _TESTS_NEEDING_SYSTEM_SIGNAL
+ if test_set in ['all', 'blackbox']:
+ test_targets = _TESTS_NO_SIGNAL
- if launcher.SupportsDeepLink():
- test_targets += _TESTS_NEEDING_DEEP_LINK
+ if launcher.SupportsSuspendResume():
+ test_targets += _TESTS_NEEDING_SYSTEM_SIGNAL
+
+ if launcher.SupportsDeepLink():
+ test_targets += _TESTS_NEEDING_DEEP_LINK
+
+ if test_set in ['all', 'wpt']:
+ test_targets += _WPT_TESTS
+
+ test_suite = unittest.TestSuite()
+ for test in test_targets:
+ test_suite.addTest(unittest.TestLoader().loadTestsFromModule(
+ importlib.import_module(_TEST_DIR_PATH + test)))
+ return test_suite
+
+
+def LoadEvergreenEndToEndTests(launcher_params):
+ launcher = abstract_launcher.LauncherFactory( # pylint: disable=unused-variable
+ launcher_params.platform,
+ _LAUNCH_TARGET,
+ launcher_params.config,
+ device_id=launcher_params.device_id,
+ target_params=None,
+ output_file=None,
+ out_directory=launcher_params.out_directory,
+ loader_platform=launcher_params.loader_platform,
+ loader_config=launcher_params.loader_config,
+ loader_out_directory=launcher_params.loader_out_directory,
+ # The more lightweight elf_loader_sandbox can't be used since it has no
+ # knowledge of updates or installations.
+ loader_target='loader_app')
+
+ test_targets = _TESTS_EVERGREEN_END_TO_END
test_suite = unittest.TestSuite()
for test in test_targets:
@@ -170,50 +224,49 @@
class BlackBoxTests(object):
"""Helper class to run all black box tests and return results."""
- def __init__(self,
- server_binding_address,
- proxy_address=None,
- proxy_port=None,
- test_name=None,
- wpt_http_port=None,
- device_ips=None,
- device_id=None):
+ def __init__(self, args):
+
+ self.args = args
+
+ #TODO(b/137905502): These globals should be refactored
# Setup global variables used by test cases.
global _launcher_params
_launcher_params = command_line.CreateLauncherParams()
# Keep other modules from seeing these args.
sys.argv = sys.argv[:1]
- global _binding_address
- _binding_address = server_binding_address
+ global _server_binding_address
+ _server_binding_address = args.server_binding_address
+
# Port used to create the web platform test http server. If not specified,
# a random free port is used.
- if wpt_http_port is None:
- wpt_http_port = str(self.GetUnusedPort([server_binding_address]))
global _wpt_http_port
- _wpt_http_port = wpt_http_port
+ _wpt_http_port = args.wpt_http_port or str(
+ self.GetUnusedPort([_server_binding_address]))
+
+ # Proxy is only needed for WPT
+ self.use_proxy = args.test_set in ['all', 'wpt']
+
# TODO: Remove generation of --dev_servers_listen_ip once executable will
# be able to bind correctly with incomplete support of IPv6
- if device_id and IsValidIpAddress(device_id):
+ if args.device_id and IsValidIpAddress(args.device_id):
_launcher_params.target_params.append(
- f'--dev_servers_listen_ip={device_id}')
- elif IsValidIpAddress(server_binding_address):
+ f'--dev_servers_listen_ip={args.device_id}')
+ elif IsValidIpAddress(_server_binding_address):
_launcher_params.target_params.append(
- f'--dev_servers_listen_ip={server_binding_address}')
+ f'--dev_servers_listen_ip={_server_binding_address}')
_launcher_params.target_params.append(
- f'--web-platform-test-server=http://web-platform.test:{wpt_http_port}')
+ f'--web-platform-test-server=http://web-platform.test:{_wpt_http_port}')
# Port used to create the proxy server. If not specified, a random free
# port is used.
- if proxy_port is None:
- proxy_port = str(self.GetUnusedPort([server_binding_address]))
- if proxy_address is None:
- proxy_address = server_binding_address
- _launcher_params.target_params.append(
- f'--proxy={proxy_address}:{proxy_port}')
+ if self.use_proxy:
+ self.proxy_port = args.proxy_port or str(
+ self.GetUnusedPort([_server_binding_address]))
+ proxy_address = args.proxy_address or _server_binding_address
+ _launcher_params.target_params.append(
+ f'--proxy={proxy_address}:{self.proxy_port}')
- self.proxy_port = proxy_port
- self.test_name = test_name
- self.device_ips = device_ips
+ self.device_ips = args.device_ips
# Test domains used in web platform tests to be resolved to the server
# binding address.
@@ -222,33 +275,61 @@
'www2.web-platform.test', 'xn--n8j6ds53lwwkrqhv28a.web-platform.test',
'xn--lve-6lad.web-platform.test'
]
- self.host_resolve_map = {host: server_binding_address for host in hosts}
+ self.host_resolve_map = {host: _server_binding_address for host in hosts}
def Run(self):
- if self.proxy_port == '-1':
+ if self.use_proxy and self.proxy_port == '-1':
return 1
- if (f'{_launcher_params.platform}/{_launcher_params.config}'
- in _DISABLED_BLACKBOXTEST_CONFIGS):
- logging.warning('Blackbox tests disabled for platform:%s config:%s',
- _launcher_params.platform, _launcher_params.config)
+ run_cobalt_tests = True
+ run_evergreen_tests = False
+ launch_config = f'{_launcher_params.platform}/{_launcher_params.config}'
+ # TODO(b/135549281): Configuring this in Python is superfluous, the on/off
+ # flags can be in Github Actions code
+ if launch_config in _DISABLED_BLACKBOXTEST_CONFIGS:
+ run_cobalt_tests = False
+ logging.warning(
+ 'Cobalt blackbox tests disabled for platform:%s config:%s',
+ _launcher_params.platform, _launcher_params.config)
+
+ if launch_config in _EVERGREEN_COMPATIBLE_CONFIGS:
+ run_evergreen_tests = self.args.test_set in ['all', 'evergreen']
+
+ if not (run_cobalt_tests or run_evergreen_tests):
return 0
- logging.info('Using proxy port: %s', self.proxy_port)
-
- with ProxyServer(
- port=self.proxy_port,
- host_resolve_map=self.host_resolve_map,
- client_ips=self.device_ips):
- if self.test_name:
+ def LoadAndRunTests():
+ if self.args.test_name:
suite = unittest.TestLoader().loadTestsFromName(_TEST_DIR_PATH +
- self.test_name)
+ self.args.test_name)
+ return_code = not unittest.TextTestRunner(
+ verbosity=2, stream=sys.stdout).run(suite).wasSuccessful()
+ return return_code
else:
- suite = LoadTests(_launcher_params)
- # Using verbosity=2 to log individual test function names and results.
- return_code = not unittest.TextTestRunner(
- verbosity=2, stream=sys.stdout).run(suite).wasSuccessful()
- return return_code
+ cobalt_tests_return_code = 0
+ if run_cobalt_tests:
+ suite = LoadTests(_launcher_params, self.args.test_set)
+ # Using verbosity=2 to log individual test function names and results.
+ cobalt_tests_return_code = not unittest.TextTestRunner(
+ verbosity=2, stream=sys.stdout).run(suite).wasSuccessful()
+
+ evergreen_tests_return_code = 0
+ if run_evergreen_tests:
+ suite = LoadEvergreenEndToEndTests(_launcher_params)
+ evergreen_tests_return_code = not unittest.TextTestRunner(
+ verbosity=2, stream=sys.stdout).run(suite).wasSuccessful()
+
+ return cobalt_tests_return_code or evergreen_tests_return_code
+
+ if self.use_proxy:
+ logging.info('Using proxy port: %s', self.proxy_port)
+ with ProxyServer(
+ port=self.proxy_port,
+ host_resolve_map=self.host_resolve_map,
+ client_ips=self.args.device_ips):
+ return LoadAndRunTests()
+ else:
+ return LoadAndRunTests()
def GetUnusedPort(self, addresses):
"""Find a free port on the list of addresses by pinging with sockets."""
@@ -346,14 +427,15 @@
nargs='*',
help=('IPs of test devices that will be allowed to connect. If not '
'specified, all IPs will be allowed to connect.'))
+ parser.add_argument(
+ '--test_set',
+ choices=['all', 'wpt', 'blackbox', 'evergreen'],
+ default='all')
args, _ = parser.parse_known_args()
log_level.InitializeLogging(args)
- test_object = BlackBoxTests(args.server_binding_address, args.proxy_address,
- args.proxy_port, args.test_name,
- args.wpt_http_port, args.device_ips,
- args.device_id)
+ test_object = BlackBoxTests(args)
sys.exit(test_object.Run())
diff --git a/cobalt/black_box_tests/testdata/evergreen_test.html b/cobalt/black_box_tests/testdata/evergreen_test.html
new file mode 100644
index 0000000..b989250
--- /dev/null
+++ b/cobalt/black_box_tests/testdata/evergreen_test.html
@@ -0,0 +1,28 @@
+<!DOCTYPE html>
+<!--
+Copyright 2023 The Cobalt Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+<html>
+
+<<head>
+ <title>Cobalt Evergreen Test</title>
+ <script src='black_box_js_test_utils.js'></script>
+</head>
+
+<body>
+ <script src='evergreen_test_script.js'></script>
+</body>>
+
+</html>
diff --git a/cobalt/black_box_tests/testdata/evergreen_test_script.js b/cobalt/black_box_tests/testdata/evergreen_test_script.js
new file mode 100644
index 0000000..cb03636
--- /dev/null
+++ b/cobalt/black_box_tests/testdata/evergreen_test_script.js
@@ -0,0 +1,101 @@
+// Copyright 2023 The Cobalt Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+var changeChannelResult = null;
+var waitForStatusResult = null;
+
+function changeChannel() {
+ const currentChannel = window.h5vcc.updater.getUpdaterChannel();
+ const status = window.h5vcc.updater.getUpdateStatus();
+
+ if (currentChannel == "" || status == "") {
+ // An update check hasn't happened yet.
+ return;
+ }
+
+ if (status != "App is up to date" &&
+ status != "Update installed, pending restart") {
+ // An update is in progress.
+ return;
+ }
+
+ if (currentChannel == "prod") {
+ window.h5vcc.updater.setUpdaterChannel(targetChannel);
+ console.log('The channel was changed to ' + targetChannel);
+ clearInterval(changeChannelResult);
+ waitForStatusResult = setInterval(waitForStatus, 500, targetStatus);
+ return;
+ }
+
+ if (currentChannel == targetChannel) {
+ clearInterval(changeChannelResult);
+ waitForStatusResult = setInterval(waitForStatus, 500, targetStatus);
+ return;
+ }
+}
+
+function waitForStatus() {
+ const currentStatus = window.h5vcc.updater.getUpdateStatus();
+
+ if (currentStatus == targetStatus) {
+ console.log('The expected status was found: ' + targetStatus);
+ assertTrue(true);
+ clearInterval(waitForStatusResult);
+ endTest();
+ return;
+ }
+
+ return;
+}
+
+function endTest() {
+ onEndTest();
+ setupFinished();
+}
+
+var resetInstallations = false;
+var encodedStatus = null;
+var targetStatus = null;
+var targetChannel = null;
+
+var query = window.location.search;
+
+if (query) {
+ // Splits each parameter into an array after removing the prepended "?".
+ query = query.slice(1).split("&");
+}
+
+query.forEach(part => {
+ if (part.startsWith("resetInstallations=")) {
+ resetInstallations = (part.split("=")[1] === "true")
+ }
+
+ if (part.startsWith("status=")) {
+ encodedStatus = part.split("=")[1];
+ targetStatus = decodeURI(encodedStatus);
+ }
+
+ if (part.startsWith("channel=")) {
+ targetChannel = part.split("=")[1];
+ }
+});
+
+if (resetInstallations) {
+ window.h5vcc.updater.resetInstallations();
+ console.log('Installations have been reset');
+ assertTrue(true);
+ endTest();
+} else {
+ ChannelResult = setInterval(changeChannel, 500);
+}
diff --git a/cobalt/black_box_tests/testdata/wasm_basic_test.html b/cobalt/black_box_tests/testdata/wasm_basic_test.html
new file mode 100644
index 0000000..ccca569
--- /dev/null
+++ b/cobalt/black_box_tests/testdata/wasm_basic_test.html
@@ -0,0 +1,83 @@
+<!DOCTYPE html>
+<!--
+ Copyright 2023 The Cobalt Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<head>
+ <title>Cobalt WASM Basic Test</title>
+ <script src='black_box_js_test_utils.js'></script>
+</head>
+
+<body>
+ <script>
+const fail = msg => {
+ if (msg) {
+ console.error(msg);
+ }
+ notReached();
+};
+const timeoutId = setTimeout(fail, 3000);
+const success = () => {
+ clearTimeout(timeoutId);
+ onEndTest();
+};
+
+const Module = {};
+
+const noop = () => {};
+const wasmImports = {
+ emscripten_notify_memory_growth: () => {
+ if (!Module.asm || !Module.asm.memory) {
+ return;
+ }
+ Module.HEAPU8 = new Uint8Array(Module.asm.memory.buffer);
+ },
+ fd_close: noop,
+ fd_seek: noop,
+ fd_write: noop,
+ proc_exit: noop,
+};
+const importObject = {
+ env: wasmImports,
+ wasi_snapshot_preview1: wasmImports,
+};
+
+const stringFromHeap = stringPointer => {
+ const heap = Module.HEAPU8;
+ let endPointer = stringPointer;
+ // Find null terminating character.
+ while (heap[endPointer]) ++endPointer;
+ return (new TextDecoder('utf8')).decode(heap.subarray(stringPointer, endPointer));
+}
+
+fetch('wasm_basic_test.wasm')
+ .then(response => response.arrayBuffer())
+ .then(bufferSource => WebAssembly.instantiate(bufferSource, importObject))
+ .then(({instance}) => {
+ Module.asm = instance.exports;
+ wasmImports.emscripten_notify_memory_growth();
+ assertEqual(3, Module.asm.add(1, 2));
+ // Include space for null terminating byte.
+ const helloStringPointer = Module.asm.malloc(7);
+ Module.asm.getHello(helloStringPointer);
+ assertEqual("Hello.", stringFromHeap(helloStringPointer));
+ Module.asm.free(helloStringPointer);
+ success();
+ })
+ .catch(fail);
+
+setupFinished();
+ </script>
+</body>
diff --git a/cobalt/black_box_tests/testdata/wasm_basic_test.wasm b/cobalt/black_box_tests/testdata/wasm_basic_test.wasm
new file mode 100755
index 0000000..61e7da2
--- /dev/null
+++ b/cobalt/black_box_tests/testdata/wasm_basic_test.wasm
Binary files differ
diff --git a/cobalt/black_box_tests/testdata/web_worker_test.html b/cobalt/black_box_tests/testdata/web_worker_test.html
index 9be8f5b..f4d264f 100644
--- a/cobalt/black_box_tests/testdata/web_worker_test.html
+++ b/cobalt/black_box_tests/testdata/web_worker_test.html
@@ -19,95 +19,99 @@
functionality. This can be expanded as more functionality is implemented,
but probably will be superseded by Web Platform Tests.
-->
+
<head>
- <title>Cobalt Web Worker Test</title>
- <script src='black_box_js_test_utils.js'></script>
+ <title>Cobalt Web Worker Test</title>
+ <script src='black_box_js_test_utils.js'></script>
</head>
<body>
-<script>
+ <script>
var window_error_event_count = 0;
+ var worker_error_event_count = 0;
window.onerror = function (message) {
- window_error_event_count += 1;
- assertIncludes('TypeError: self.Foo is not a function', message);
- console.log('window got onerror', message);
+ window_error_event_count += 1;
+ console.error('window got onerror', message);
+ notReached();
};
var message_event_count = 0;
- console.log('running');
- // This is expected trigger a an error event on window.
+ // This is expected trigger an error event on the Worker object.
var worker_with_error = new Worker('web_worker_test_with_syntax_error.js',
- { name : 'worker_with_error'});
+ { name: 'worker_with_error' });
+ worker_with_error.onerror = function (event) {
+ worker_error_event_count += 1;
+ assertIncludes('TypeError: self.Foo is not a function', event.message);
+ };
- var worker = new Worker('web_worker_test.js', { name : 'test_worker'});
- console.log(worker);
- worker.onmessage = function (event) {
- message_event_count += 1;
- console.log('window got message', message_event_count, 'from worker:', event.data);
- switch (message_event_count) {
- case 1:
- assertEqual('web worker test loaded', event.data);
- break;
- case 2:
- // The first script is expected to execute even though the next
- // one has a syntax error.
- assertEqual('Imported Script 1', event.data);
- break;
- case 3:
- assertEqual('Imported Script Before Syntax Error', event.data);
- break;
- case 4:
- assertEqual('Expected exception message 4: SyntaxError', event.data);
- break;
- case 5:
- assertEqual('Expected exception message 5: SyntaxError', event.data);
- worker.postMessage('import scripts now');
- worker.postMessage('just some data');
- worker.postMessage('a special message');
- break;
- case 6:
- // Scripts loaded with importScripts are guaranteed to execute
- // in order.
- assertEqual('Imported Script 1', event.data);
- break;
- case 7:
- assertEqual('Imported Script 2', event.data);
- break;
- case 8:
- assertEqual('Imported Script 3', event.data);
- break;
- case 9:
- assertEqual('worker received import scripts now', event.data);
- break;
- case 10:
- assertEqual('IMPORT SCRIPTS NOW', event.data);
- break;
- case 11:
- assertEqual('worker received just some data', event.data);
- break;
- case 12:
- assertEqual('JUST SOME DATA', event.data);
- break;
- case 13:
- assertEqual('worker received a special message', event.data);
- break;
- case 14:
- assertEqual('A SPECIAL MESSAGE', event.data);
- worker.terminate();
- window.setTimeout(
- () => {
- assertEqual(14, message_event_count);
- assertEqual(1, window_error_event_count);
- onEndTest();
- }, 250);
- break;
- }
- };
+ var worker = new Worker('web_worker_test.js', { name: 'test_worker' });
worker.onerror = function (event) {
- console.log('window got onerror');
- notReached();
+ worker_error_event_count += 1;
+ console.error('worker got onerror', event.message);
+ notReached();
};
- console.log('end');
-</script>
+ worker.onmessage = function (event) {
+ message_event_count += 1;
+ switch (message_event_count) {
+ case 1:
+ assertEqual('web worker test loaded', event.data);
+ break;
+ case 2:
+ // The first script is expected to execute even though the next
+ // one has a syntax error.
+ assertEqual('Imported Script 1', event.data);
+ break;
+ case 3:
+ assertEqual('Imported Script Before Syntax Error', event.data);
+ break;
+ case 4:
+ assertEqual('Expected exception message 4: SyntaxError', event.data);
+ break;
+ case 5:
+ assertEqual('Expected exception message 5: SyntaxError', event.data);
+ worker.postMessage('import scripts now');
+ worker.postMessage('just some data');
+ worker.postMessage('a special message');
+ break;
+ case 6:
+ // Scripts loaded with importScripts are guaranteed to execute
+ // in order.
+ assertEqual('Imported Script 1', event.data);
+ break;
+ case 7:
+ assertEqual('Imported Script 2', event.data);
+ break;
+ case 8:
+ assertEqual('Imported Script 3', event.data);
+ break;
+ case 9:
+ assertEqual('worker received import scripts now', event.data);
+ break;
+ case 10:
+ assertEqual('IMPORT SCRIPTS NOW', event.data);
+ break;
+ case 11:
+ assertEqual('worker received just some data', event.data);
+ break;
+ case 12:
+ assertEqual('JUST SOME DATA', event.data);
+ break;
+ case 13:
+ assertEqual('worker received a special message', event.data);
+ break;
+ case 14:
+ assertEqual('A SPECIAL MESSAGE', event.data);
+ worker.terminate();
+ window.setTimeout(
+ () => {
+ assertEqual(14, message_event_count);
+ assertEqual(0, window_error_event_count);
+ assertEqual(1, worker_error_event_count);
+ onEndTest();
+ }, 250);
+ break;
+ }
+ };
+ </script>
</body>
diff --git a/cobalt/black_box_tests/testdata/web_worker_test.js b/cobalt/black_box_tests/testdata/web_worker_test.js
index 8e029a6..ea5a527 100644
--- a/cobalt/black_box_tests/testdata/web_worker_test.js
+++ b/cobalt/black_box_tests/testdata/web_worker_test.js
@@ -16,10 +16,8 @@
self.postMessage(data);
self.onmessage = function (event) {
let message = `worker received ${event.data}`;
- console.log(message);
if (event.data == 'import scripts now') {
// These should load and execute synchronously.
- console.log('Worker importing scripts.');
self.importScripts('web_worker_test_importscripts_1.js',
'web_worker_test_importscripts_2.js',
'web_worker_test_importscripts_3.js');
@@ -38,7 +36,6 @@
'web_worker_test_importscripts_3.js');
} catch (e) {
message = 'Expected exception message 4: ' + e;
- console.log(message);
self.postMessage(message);
}
@@ -46,7 +43,6 @@
self.importScripts('...:...');
} catch (e) {
message = 'Expected exception message 5: ' + e;
- console.log(message);
self.postMessage(message);
}
diff --git a/cobalt/black_box_tests/testdata/web_worker_test_importscripts_1.js b/cobalt/black_box_tests/testdata/web_worker_test_importscripts_1.js
index 54c48cd..29aa77a 100644
--- a/cobalt/black_box_tests/testdata/web_worker_test_importscripts_1.js
+++ b/cobalt/black_box_tests/testdata/web_worker_test_importscripts_1.js
@@ -12,5 +12,4 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-console.log('worker test importscripts 1');
self.postMessage('Imported Script 1');
diff --git a/cobalt/black_box_tests/testdata/web_worker_test_importscripts_2.js b/cobalt/black_box_tests/testdata/web_worker_test_importscripts_2.js
index a32f94b..ea539ba 100644
--- a/cobalt/black_box_tests/testdata/web_worker_test_importscripts_2.js
+++ b/cobalt/black_box_tests/testdata/web_worker_test_importscripts_2.js
@@ -12,5 +12,4 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-console.log('worker test importscripts 2');
self.postMessage('Imported Script 2');
diff --git a/cobalt/black_box_tests/testdata/web_worker_test_importscripts_3.js b/cobalt/black_box_tests/testdata/web_worker_test_importscripts_3.js
index 3d59d1c..43e714c 100644
--- a/cobalt/black_box_tests/testdata/web_worker_test_importscripts_3.js
+++ b/cobalt/black_box_tests/testdata/web_worker_test_importscripts_3.js
@@ -12,5 +12,4 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-console.log('worker test importscripts 3');
self.postMessage('Imported Script 3');
diff --git a/cobalt/black_box_tests/testdata/web_worker_test_importscripts_with_syntax_error.js b/cobalt/black_box_tests/testdata/web_worker_test_importscripts_with_syntax_error.js
index 49275c8..743ec51 100644
--- a/cobalt/black_box_tests/testdata/web_worker_test_importscripts_with_syntax_error.js
+++ b/cobalt/black_box_tests/testdata/web_worker_test_importscripts_with_syntax_error.js
@@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-console.log('worker test importscripts with syntax error');
self.postMessage('Imported Script Before Syntax Error');
self.Foo('Bar');
self.postMessage('Imported Script After Syntax Error');
diff --git a/cobalt/black_box_tests/testdata/worker_csp_test.html b/cobalt/black_box_tests/testdata/worker_csp_test.html
index ba09f97..1b2d0b4 100644
--- a/cobalt/black_box_tests/testdata/worker_csp_test.html
+++ b/cobalt/black_box_tests/testdata/worker_csp_test.html
@@ -22,31 +22,37 @@
<body>
<script>
- var worker;
- var window_onerror_count = 0;
- window.onerror = (message, filename, lineno, colno, error) => {
- ++window_onerror_count;
- // Note: Worker execution errors currently don't pass line or column
- // number in the error message.
- assertIncludes('SecurityError', message);
- assertIncludes('worker_csp_test.js', filename);
- assertEqual(1, window_onerror_count);
- window.setTimeout(
- () => {
- worker.terminate();
- onEndTest();
- }, 250);
- }
+ var worker;
+ var window_onerror_count = 0;
+ var worker_onerror_count = 0;
+ window.onerror = (message, filename, lineno, colno, error) => {
+ ++window_onerror_count;
+ console.log('window got onerror', message);
+ notReached();
+ }
- // This worker attempts to do an XHR request that is blocked by CSP.
- worker = new Worker('worker_csp_test.js');
- worker.onmessage = function (event) {
- notReached();
- };
- worker.onerror = function (event) {
- // Note: The Worker's onerror handler (incorrectly) isn't called.
- notReached();
- };
+ // This worker attempts to do an XHR request that is blocked by CSP.
+ worker = new Worker('worker_csp_test.js');
+ worker.onmessage = function (event) {
+ console.log('worker got onmessage', event.data);
+ notReached();
+ };
+ worker.onerror = function (event) {
+ ++worker_onerror_count;
+ console.log('worker got onerror', event.message);
+ // Note: Worker execution errors currently don't pass line or column
+ // number in the error message.
+ assertIncludes('SecurityError', event.message);
+ assertIncludes('worker_csp_test.js', event.filename);
+ assertEqual(1, worker_onerror_count);
+ window.setTimeout(
+ () => {
+ assertEqual(1, worker_onerror_count);
+ assertEqual(0, window_onerror_count);
+ worker.terminate();
+ onEndTest();
+ }, 250);
+ };
</script>
</body>
diff --git a/cobalt/black_box_tests/testdata/worker_load_csp_test.html b/cobalt/black_box_tests/testdata/worker_load_csp_test.html
index f265f05..6defe4b 100644
--- a/cobalt/black_box_tests/testdata/worker_load_csp_test.html
+++ b/cobalt/black_box_tests/testdata/worker_load_csp_test.html
@@ -22,35 +22,39 @@
<body>
<script>
- var window_onerror_count = 0;
- window.onerror = (message, filename, lineno, colno, error) => {
- ++window_onerror_count;
- if (message.includes('blocked_worker.js')) {
- assertIncludes('rejected by security policy', message);
- assertIncludes('worker_load_csp_test.html', filename);
- } else {
+ var window_onerror_count = 0;
+ var worker_onerror_count = 0;
+ window.onerror = (message, filename, lineno, colno, error) => {
+ ++window_onerror_count;
+ console.log('window got onerror', message);
notReached();
}
- if (window_onerror_count == 1) {
- window.setTimeout(
- () => {
- assertEqual(1, window_onerror_count);
- onEndTest();
- }, 250);
- }
- }
+ // This worker is blocked because the URL isn't allowed by CSP.
+ try {
+ var blocked_worker = new Worker('https://www.google.com/blocked_worker.js');
+ blocked_worker.onerror = function (event) {
+ ++worker_onerror_count;
+ console.log('worker got onerror', event.message);
+ if (event.message.includes('blocked_worker.js')) {
+ assertIncludes('rejected by security policy', event.message);
+ assertEqual('https://www.google.com/blocked_worker.js', event.filename);
+ } else {
+ notReached();
+ }
+ if (worker_onerror_count == 1) {
+ window.setTimeout(
+ () => {
+ assertEqual(1, worker_onerror_count);
+ assertEqual(0, window_onerror_count);
+ onEndTest();
+ }, 250);
- // This worker is blocked because the URL isn't allowed by CSP.
- try {
- var blocked_worker = new Worker('https://www.google.com/blocked_worker.js');
- blocked_worker.onerror = function (event) {
- // Note: The Worker's onerror handler (incorrectly) isn't called.
+ }
+ };
+ } catch (error) {
+ // The error is thrown asynchronously after the Worker constructor.
notReached();
- };
- } catch (error) {
- // The error is thrown asynchronously after the Worker constructor.
- notReached();
- }
+ }
</script>
</body>
diff --git a/cobalt/black_box_tests/testdata/worker_load_test.html b/cobalt/black_box_tests/testdata/worker_load_test.html
index 7f13191..dc4aeac 100644
--- a/cobalt/black_box_tests/testdata/worker_load_test.html
+++ b/cobalt/black_box_tests/testdata/worker_load_test.html
@@ -22,45 +22,48 @@
<body>
<script>
- var window_onerror_count = 0;
- window.onerror = (message, filename, lineno, colno, error) => {
- ++window_onerror_count;
- console.log(message);
- if (message.includes('nonexisting_worker.js')) {
- assertIncludes('aborted or failed with code 404', message);
- assertIncludes('worker_load_test.html', filename);
- } else {
+ var window_onerror_count = 0;
+ var worker_onerror_count = 0;
+ window.onerror = (message, filename, lineno, colno, error) => {
+ ++window_onerror_count;
+ console.log('window got onerror', message);
notReached();
}
- if (window_onerror_count == 1) {
- window.setTimeout(
- () => {
- assertEqual(1, window_onerror_count);
- onEndTest();
- }, 250);
- }
- }
-
- // This worker is blocked because the URL can't resolve.
- try {
- var blocked_worker = new Worker('..:/blocked_worker.js');
- notReached();
- } catch(error) {
- console.log(error);
- assertEqual('SyntaxError', error.name);
- }
-
- // This worker is blocked because the script does not exist.
- try {
- var nonexisting_worker = new Worker('nonexisting_worker.js');
- nonexisting_worker.onerror = function (event) {
- // Note: The Worker's onerror handler (incorrectly) isn't called.
+ // This worker is blocked because the URL can't resolve.
+ try {
+ var blocked_worker = new Worker('..:/blocked_worker.js');
notReached();
- };
- } catch (error) {
- // The error is thrown asynchronously after the Worker constructor.
- notReached();
- }
+ } catch (error) {
+ console.log(error);
+ assertEqual('SyntaxError', error.name);
+ }
+
+ // This worker is blocked because the script does not exist.
+ try {
+ var nonexisting_worker = new Worker('nonexisting_worker.js');
+ nonexisting_worker.onerror = function (event) {
+ ++worker_onerror_count;
+ console.log(event.message);
+ if (event.message.includes('nonexisting_worker.js')) {
+ assertIncludes('aborted or failed with code 404', event.message);
+ assertIncludes('/testdata/nonexisting_worker.js', event.filename);
+ } else {
+ notReached();
+ }
+ if (worker_onerror_count == 1) {
+ window.setTimeout(
+ () => {
+ assertEqual(1, worker_onerror_count);
+ assertEqual(0, window_onerror_count);
+ onEndTest();
+ }, 250);
+
+ }
+ };
+ } catch (error) {
+ // The error is thrown asynchronously after the Worker constructor.
+ notReached();
+ }
</script>
</body>
diff --git a/cobalt/black_box_tests/tests/compression_test.py b/cobalt/black_box_tests/tests/compression_test.py
index 505cc45..27ea24a 100644
--- a/cobalt/black_box_tests/tests/compression_test.py
+++ b/cobalt/black_box_tests/tests/compression_test.py
@@ -33,7 +33,7 @@
return encode_deflate(raw_content)
elif encoding_type == 'br':
return encode_brotli(raw_content)
- raise ValueError('Unknown encoding type used [{}].'.format(encoding_type))
+ raise ValueError(f'Unknown encoding type used [{encoding_type}].')
def encode_gzip(raw_content):
diff --git a/cobalt/black_box_tests/tests/evergreen_verify_qa_channel_update_test.py b/cobalt/black_box_tests/tests/evergreen_verify_qa_channel_update_test.py
new file mode 100644
index 0000000..12e422b
--- /dev/null
+++ b/cobalt/black_box_tests/tests/evergreen_verify_qa_channel_update_test.py
@@ -0,0 +1,68 @@
+# Copyright 2023 The Cobalt Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Tests successful update to Cobalt binary available on the test channel."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from cobalt.black_box_tests import black_box_tests
+from cobalt.black_box_tests.threaded_web_server import ThreadedWebServer
+
+
+class EvergreenVerifyQaChannelUpdateTest(black_box_tests.BlackBoxTestCase):
+
+ def test_evergreen_verify_qa_channel_update(self):
+ with ThreadedWebServer(binding_address=self.GetBindingAddress()) as server:
+ url = server.GetURL(
+ file_name='testdata/evergreen_test.html?resetInstallations=true')
+ # Resetting the installations doesn't require an update check.
+ with self.CreateCobaltRunner(
+ url=url,
+ target_params=['--update_check_delay_seconds=300'],
+ loader_target='loader_app') as runner:
+ runner.WaitForJSTestsSetup()
+ self.assertTrue(runner.JSTestsSucceeded())
+
+ url = server.GetURL(
+ file_name='testdata/evergreen_test.html?channel=test&status=Update installed, pending restart' # pylint: disable=line-too-long
+ .replace(' ', '%20'))
+ # 100 seconds provides enough time for the initial update delay, the prod
+ # channel update, and the target channel update.
+ with self.CreateCobaltRunner(
+ url=url, poll_until_wait_seconds=100,
+ loader_target='loader_app') as runner:
+ runner.WaitForJSTestsSetup()
+ self.assertTrue(runner.JSTestsSucceeded())
+
+ url = server.GetURL(
+ file_name='testdata/evergreen_test.html?channel=test&status=App is up to date' # pylint: disable=line-too-long
+ .replace(' ', '%20'))
+ # 60 seconds provides enough time for the initial update delay and target
+ # channel update check.
+ with self.CreateCobaltRunner(
+ url=url, poll_until_wait_seconds=60,
+ loader_target='loader_app') as runner:
+ runner.WaitForJSTestsSetup()
+ self.assertTrue(runner.JSTestsSucceeded())
+
+ url = server.GetURL(
+ file_name='testdata/evergreen_test.html?resetInstallations=true')
+ # Resetting the installations doesn't require an update check.
+ with self.CreateCobaltRunner(
+ url=url,
+ target_params=['--update_check_delay_seconds=300'],
+ loader_target='loader_app') as runner:
+ runner.WaitForJSTestsSetup()
+ self.assertTrue(runner.JSTestsSucceeded())
diff --git a/cobalt/black_box_tests/tests/override_ua_parameters.py b/cobalt/black_box_tests/tests/override_ua_parameters.py
index 3dec188..bc9eb3f 100644
--- a/cobalt/black_box_tests/tests/override_ua_parameters.py
+++ b/cobalt/black_box_tests/tests/override_ua_parameters.py
@@ -43,9 +43,9 @@
'foo.bar.baz.qux/21.2.1.41.0'
if not ua_request_header == expected_ua_request_header:
- raise ValueError('UA string in HTTP request header does not match with '\
- 'UA params overrides specified in command line\n'\
- 'UA string in HTTP request header:%s' % (ua_request_header))
+ raise ValueError('UA string in HTTP request header does not match with '
+ 'UA params overrides specified in command line\n'
+ f'UA string in HTTP request header:{ua_request_header}')
return SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
diff --git a/cobalt/black_box_tests/tests/wasm_basic_test.py b/cobalt/black_box_tests/tests/wasm_basic_test.py
new file mode 100644
index 0000000..2515b53
--- /dev/null
+++ b/cobalt/black_box_tests/tests/wasm_basic_test.py
@@ -0,0 +1,48 @@
+# Copyright 2023 The Cobalt Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Tests Cobalt can load and use WebAssembly."""
+
+from cobalt.black_box_tests import black_box_tests
+from cobalt.black_box_tests.threaded_web_server import ThreadedWebServer
+import logging
+
+PLATFORMS_SUPPORTED = [
+ 'stub',
+ 'linux-x64x11',
+ 'linux-x64x11-egl',
+ 'linux-x64x11-gcc-6-3',
+ 'linux-x64x11-skia',
+ 'android-arm',
+ 'android-arm64',
+ 'android-arm64-vulkan',
+ 'android-x86',
+ 'raspi-2',
+ 'raspi-2-skia',
+ 'win-win32',
+ 'linux-x64x11-clang-crosstool',
+]
+
+
+class WasmBasicTest(black_box_tests.BlackBoxTestCase):
+
+ def test_wasm_basic(self):
+ if self.launcher_params.platform not in PLATFORMS_SUPPORTED:
+ logging.warning('Blackbox tests disabled for platform:%s',
+ self.launcher_params.platform)
+ return
+
+ with ThreadedWebServer(binding_address=self.GetBindingAddress()) as server:
+ url = server.GetURL(file_name='testdata/wasm_basic_test.html')
+ with self.CreateCobaltRunner(url=url) as runner:
+ self.assertTrue(runner.JSTestsSucceeded())
diff --git a/cobalt/black_box_tests/tests/web_debugger.py b/cobalt/black_box_tests/tests/web_debugger.py
index 5d2d209..f55a037 100644
--- a/cobalt/black_box_tests/tests/web_debugger.py
+++ b/cobalt/black_box_tests/tests/web_debugger.py
@@ -28,6 +28,7 @@
from cobalt.black_box_tests import black_box_tests
from cobalt.black_box_tests.threaded_web_server import ThreadedWebServer
+from starboard.tools import config
sys.path.append(
os.path.join(
@@ -43,16 +44,15 @@
"""Exception when an error response is received for a command."""
def __init__(self, error):
- code = '[{}] '.format(error['code']) if 'code' in error else ''
- super(DebuggerCommandError, self).__init__(code + error['message'])
+ code = f"[{error['code']}] " if 'code' in error else ''
+ super().__init__(code + error['message'])
class DebuggerEventError(Exception):
"""Exception when an unexpected event is received."""
def __init__(self, expected, actual):
- super(DebuggerEventError,
- self).__init__('Waiting for {} but got {}'.format(expected, actual))
+ super().__init__(f'Waiting for {expected} but got {actual}')
class JavaScriptError(Exception):
@@ -63,7 +63,7 @@
ex = exception_details.get('exception', {})
fallback = ex.get('className', 'Unknown error') + ' (No description)'
msg = ex.get('description', fallback)
- super(JavaScriptError, self).__init__(msg)
+ super().__init__(msg)
class DebuggerConnection(object):
@@ -130,10 +130,10 @@
while command_id not in self.responses:
try:
self._receive_message()
- except websocket.WebSocketTimeoutException:
+ except websocket.WebSocketTimeoutException as e:
method = self.commands[command_id]['method']
raise DebuggerCommandError(
- {'message': 'Timeout waiting for response to ' + method})
+ {'message': 'Timeout waiting for response to ' + method}) from e
self.commands.pop(command_id)
return self.responses.pop(command_id)
@@ -147,7 +147,7 @@
# "Debugger.scriptParsed" events get sent as artifacts of the debugger
# backend implementation running its own injected code, so they are ignored
# unless that's what we're waiting for.
- allow_script_parsed = (method == 'Debugger.scriptParsed')
+ allow_script_parsed = method == 'Debugger.scriptParsed'
# Pop already-received events from the event queue.
def _next_event():
@@ -165,8 +165,8 @@
break
try:
self._receive_message()
- except websocket.WebSocketTimeoutException:
- raise DebuggerEventError(method, 'None (timeout)')
+ except websocket.WebSocketTimeoutException as e:
+ raise DebuggerEventError(method, 'None (timeout)') from e
if method != event['method']:
raise DebuggerEventError(method, event['method'])
return event
@@ -216,13 +216,13 @@
"""Test interaction with the web debugger over a WebSocket."""
def set_up_with(self, cm):
- val = cm.__enter__()
+ val = cm.__enter__() # pylint: disable=unnecessary-dunder-call
self.addCleanup(cm.__exit__, None, None, None)
return val
def setUp(self):
- cobalt_vars = self.cobalt_config.GetVariables(self.launcher_params.config)
- if not cobalt_vars['enable_debugger']:
+ is_gold_config = self.launcher_params.config == config.Config.GOLD
+ if is_gold_config:
self.skipTest('DevTools is disabled on this platform')
self.server = self.set_up_with(
@@ -600,7 +600,7 @@
# (replace all children of <div#B>)
inner_html = "<div id='D'>\\n</div>"
self.debugger.evaluate_js('b = document.getElementById("B");'
- 'b.innerHTML = "%s"' % inner_html)
+ f'b.innerHTML = "{inner_html}"')
removed_event = self.debugger.wait_event('DOM.childNodeRemoved')
self.assertEqual(moved_span_b1['nodeId'], removed_event['params']['nodeId'])
inserted_event = self.debugger.wait_event('DOM.childNodeInserted')
@@ -896,6 +896,7 @@
[
'asyncBreak',
'promiseThen',
+ 'promiseTimeout',
],
[
'waitPromise',
@@ -909,6 +910,7 @@
[
'asyncBreak',
'asyncAwait',
+ 'promiseTimeout',
],
[
'asyncAwait',
diff --git a/cobalt/black_box_tests/tests/web_platform_tests.py b/cobalt/black_box_tests/tests/web_platform_tests.py
index d5e1933..dc11142 100644
--- a/cobalt/black_box_tests/tests/web_platform_tests.py
+++ b/cobalt/black_box_tests/tests/web_platform_tests.py
@@ -70,8 +70,7 @@
if used_filters:
if 'gtest_filter' not in ' '.join(self.launcher_params.target_params):
- target_params.append('--gtest_filter=-{}'.format(
- ':'.join(used_filters)))
+ target_params.append(f"--gtest_filter=-{':'.join(used_filters)}")
if self.launcher_params.target_params:
target_params += self.launcher_params.target_params
diff --git a/cobalt/browser/BUILD.gn b/cobalt/browser/BUILD.gn
index 72217bd..0b66445 100644
--- a/cobalt/browser/BUILD.gn
+++ b/cobalt/browser/BUILD.gn
@@ -114,6 +114,8 @@
"application.h",
"browser_module.cc",
"browser_module.h",
+ "client_hint_headers.cc",
+ "client_hint_headers.h",
"device_authentication.cc",
"device_authentication.h",
"lifecycle_observer.h",
@@ -231,6 +233,7 @@
has_pedantic_warnings = true
sources = [
+ "client_hint_headers_test.cc",
"device_authentication_test.cc",
"memory_settings/auto_mem_settings_test.cc",
"memory_settings/auto_mem_test.cc",
@@ -412,14 +415,3 @@
cache_templates("cached_jinja_templates") {
output_dir = _bindings_scripts_output_dir
}
-
-target(final_executable_type, "snapshot_app_stats") {
- sources = [ "snapshot_app_stats.cc" ]
- deps = [
- ":browser",
- ":browser_switches",
- ":cobalt",
- "//cobalt/base",
- "//third_party/protobuf:protobuf_lite",
- ]
-}
diff --git a/cobalt/browser/application.cc b/cobalt/browser/application.cc
index 4156bdd..3f5f413 100644
--- a/cobalt/browser/application.cc
+++ b/cobalt/browser/application.cc
@@ -55,6 +55,7 @@
#include "cobalt/base/window_on_offline_event.h"
#include "cobalt/base/window_on_online_event.h"
#include "cobalt/base/window_size_changed_event.h"
+#include "cobalt/browser/client_hint_headers.h"
#include "cobalt/browser/device_authentication.h"
#include "cobalt/browser/memory_settings/auto_mem_settings.h"
#include "cobalt/browser/memory_tracker/tool.h"
@@ -72,6 +73,8 @@
#include "cobalt/system_window/input_event.h"
#include "cobalt/trace_event/scoped_trace_to_file.h"
#include "cobalt/watchdog/watchdog.h"
+#include "starboard/common/device_type.h"
+#include "starboard/common/system_property.h"
#include "starboard/configuration.h"
#include "starboard/event.h"
#include "starboard/extension/crash_handler.h"
@@ -120,10 +123,17 @@
// Default to INADDR_ANY
std::string listen_ip(ip_v6 ? "::" : "0.0.0.0");
+#if SB_API_VERSION < 15
// Desktop PCs default to loopback.
if (SbSystemGetDeviceType() == kSbSystemDeviceTypeDesktopPC) {
listen_ip = ip_v6 ? "::1" : "127.0.0.1";
}
+#else
+ if (starboard::GetSystemPropertyString(kSbSystemPropertyDeviceType) ==
+ starboard::kSystemDeviceTypeDesktopPC) {
+ listen_ip = ip_v6 ? "::1" : "127.0.0.1";
+ }
+#endif
#if defined(ENABLE_DEBUG_COMMAND_LINE_SWITCHES)
base::CommandLine* command_line = base::CommandLine::ForCurrentProcess();
@@ -506,6 +516,7 @@
struct SecurityFlags {
csp::CSPHeaderPolicy csp_header_policy;
network::HTTPSRequirement https_requirement;
+ network::CORSPolicy cors_policy;
};
// |non_trivial_static_fields| will be lazily created on the first time it's
@@ -521,7 +532,7 @@
"available trackers.";
#endif // defined(ENABLE_DEBUGGER) && defined(STARBOARD_ALLOWS_MEMORY_TRACKING)
-void AddCrashHandlerAnnotations() {
+void AddCrashHandlerAnnotations(const UserAgentPlatformInfo& platform_info) {
auto crash_handler_extension =
static_cast<const CobaltExtensionCrashHandlerApi*>(
SbSystemGetExtension(kCobaltExtensionCrashHandlerName));
@@ -530,7 +541,6 @@
return;
}
- auto platform_info = cobalt::browser::GetUserAgentPlatformInfoFromSystem();
std::string user_agent =
cobalt::browser::CreateUserAgentString(platform_info);
std::string version = "";
@@ -589,6 +599,28 @@
}
}
+void AddCrashHandlerApplicationState(base::ApplicationState state) {
+ auto crash_handler_extension =
+ static_cast<const CobaltExtensionCrashHandlerApi*>(
+ SbSystemGetExtension(kCobaltExtensionCrashHandlerName));
+ if (!crash_handler_extension) {
+ DLOG(INFO) << "No crash handler extension, not sending application state.";
+ return;
+ }
+
+ std::string application_state = std::string(GetApplicationStateString(state));
+ application_state.push_back('\0');
+
+ if (crash_handler_extension->version > 1) {
+ if (crash_handler_extension->SetString("application_state",
+ application_state.c_str())) {
+ DLOG(INFO) << "Sent application state to crash handler.";
+ return;
+ }
+ }
+ DLOG(ERROR) << "Could not send application state to crash handler.";
+}
+
} // namespace
// Helper stub to disable histogram tracking in StatisticsRecorder
@@ -779,7 +811,8 @@
// User can specify an extra search path entry for files loaded via file://.
options.web_module_options.web_options.extra_web_file_dir =
GetExtraWebFileDir();
- SecurityFlags security_flags{csp::kCSPRequired, network::kHTTPSRequired};
+ SecurityFlags security_flags{csp::kCSPRequired, network::kHTTPSRequired,
+ network::kCORSRequired};
// Set callback to be notified when a navigation occurs that destroys the
// underlying WebModule.
options.web_module_created_callback =
@@ -808,9 +841,14 @@
security_flags.csp_header_policy = csp::kCSPOptional;
}
+ if (command_line->HasSwitch(browser::switches::kAllowAllCrossOrigin)) {
+ security_flags.cors_policy = network::kCORSOptional;
+ }
+
if (command_line->HasSwitch(browser::switches::kProd)) {
security_flags.https_requirement = network::kHTTPSRequired;
security_flags.csp_header_policy = csp::kCSPRequired;
+ security_flags.cors_policy = network::kCORSRequired;
}
if (command_line->HasSwitch(switches::kVideoPlaybackRateMultiplier)) {
@@ -851,7 +889,12 @@
security_flags.csp_header_policy = csp::kCSPRequired;
#endif // defined(COBALT_FORCE_CSP)
+#if defined(COBALT_FORCE_CORS)
+ security_flags.cors_policy = network::kCORSRequired;
+#endif // defined(COBALT_FORCE_CORS)
+
network_module_options.https_requirement = security_flags.https_requirement;
+ network_module_options.cors_policy = security_flags.cors_policy;
options.web_module_options.csp_header_policy =
security_flags.csp_header_policy;
options.web_module_options.csp_enforcement_type = web::kCspEnforcementEnable;
@@ -867,11 +910,13 @@
storage_manager_.reset(new storage::StorageManager(storage_manager_options));
+ cobalt::browser::UserAgentPlatformInfo platform_info;
+
network_module_.reset(new network::NetworkModule(
- CreateUserAgentString(GetUserAgentPlatformInfoFromSystem()),
+ CreateUserAgentString(platform_info), GetClientHintHeaders(platform_info),
storage_manager_.get(), &event_dispatcher_, network_module_options));
- AddCrashHandlerAnnotations();
+ AddCrashHandlerAnnotations(platform_info);
#if SB_IS(EVERGREEN)
if (SbSystemGetExtension(kCobaltExtensionInstallationManagerName) &&
@@ -948,14 +993,12 @@
base::Bind(&Application::OnWindowOnOfflineEvent, base::Unretained(this));
event_dispatcher_.AddEventCallback(base::WindowOnOfflineEvent::TypeId(),
on_window_on_offline_event_callback_);
-#if SB_API_VERSION >= 13
on_date_time_configuration_changed_event_callback_ =
base::Bind(&Application::OnDateTimeConfigurationChangedEvent,
base::Unretained(this));
event_dispatcher_.AddEventCallback(
base::DateTimeConfigurationChangedEvent::TypeId(),
on_date_time_configuration_changed_event_callback_);
-#endif
#if defined(ENABLE_WEBDRIVER)
#if defined(ENABLE_DEBUG_COMMAND_LINE_SWITCHES)
@@ -1038,11 +1081,9 @@
event_dispatcher_.RemoveEventCallback(
base::AccessibilityCaptionSettingsChangedEvent::TypeId(),
on_caption_settings_changed_event_callback_);
-#if SB_API_VERSION >= 13
event_dispatcher_.RemoveEventCallback(
base::DateTimeConfigurationChangedEvent::TypeId(),
on_date_time_configuration_changed_event_callback_);
-#endif
}
void Application::Start(SbTimeMonotonic timestamp) {
@@ -1083,7 +1124,6 @@
// Create a Cobalt event from the Starboard event, if recognized.
switch (starboard_event->type) {
-#if SB_API_VERSION >= 13
case kSbEventTypeBlur:
case kSbEventTypeFocus:
case kSbEventTypeConceal:
@@ -1093,15 +1133,6 @@
case kSbEventTypeLowMemory:
OnApplicationEvent(starboard_event->type, starboard_event->timestamp);
break;
-#else
- case kSbEventTypePause:
- case kSbEventTypeUnpause:
- case kSbEventTypeSuspend:
- case kSbEventTypeResume:
- case kSbEventTypeLowMemory:
- OnApplicationEvent(starboard_event->type, SbTimeGetMonotonicNow());
- break;
-#endif // SB_API_VERSION >= 13
case kSbEventTypeWindowSizeChanged:
DispatchEventInternal(new base::WindowSizeChangedEvent(
static_cast<SbEventWindowSizeChangedData*>(starboard_event->data)
@@ -1132,47 +1163,30 @@
*static_cast<int*>(starboard_event->data)));
break;
case kSbEventTypeLink: {
-#if SB_API_VERSION >= 13
DispatchDeepLink(static_cast<const char*>(starboard_event->data),
starboard_event->timestamp);
-#else // SB_API_VERSION >= 13
- DispatchDeepLink(static_cast<const char*>(starboard_event->data),
- SbTimeGetMonotonicNow());
-#endif // SB_API_VERSION >= 13
break;
}
-#if SB_API_VERSION >= 13
case kSbEventTypeAccessibilitySettingsChanged:
-#else
- case kSbEventTypeAccessiblitySettingsChanged:
-#endif // SB_API_VERSION >= 13
DispatchEventInternal(new base::AccessibilitySettingsChangedEvent());
break;
case kSbEventTypeAccessibilityCaptionSettingsChanged:
DispatchEventInternal(
new base::AccessibilityCaptionSettingsChangedEvent());
break;
-#if SB_API_VERSION >= 13
case kSbEventTypeAccessibilityTextToSpeechSettingsChanged:
-#else
- case kSbEventTypeAccessiblityTextToSpeechSettingsChanged:
-#endif // SB_API_VERSION >= 13
DispatchEventInternal(
new base::AccessibilityTextToSpeechSettingsChangedEvent());
break;
-#if SB_API_VERSION >= 13
case kSbEventTypeOsNetworkDisconnected:
DispatchEventInternal(new base::WindowOnOfflineEvent());
break;
case kSbEventTypeOsNetworkConnected:
DispatchEventInternal(new base::WindowOnOnlineEvent());
break;
-#endif
-#if SB_API_VERSION >= 13
case kSbEventDateTimeConfigurationChanged:
DispatchEventInternal(new base::DateTimeConfigurationChangedEvent());
break;
-#endif
// Explicitly list unhandled cases here so that the compiler can give a
// warning when a value is added, but not handled.
case kSbEventTypeInput:
@@ -1198,6 +1212,7 @@
case kSbEventTypeStop:
LOG(INFO) << "Got quit event.";
if (watchdog) watchdog->UpdateState(base::kApplicationStateStopped);
+ AddCrashHandlerApplicationState(base::kApplicationStateStopped);
Quit();
LOG(INFO) << "Finished quitting.";
break;
@@ -1207,7 +1222,6 @@
browser_module_->Focus(timestamp);
LOG(INFO) << "Finished starting.";
break;
-#if SB_API_VERSION >= 13
case kSbEventTypeBlur:
LOG(INFO) << "Got blur event.";
browser_module_->Blur(timestamp);
@@ -1245,37 +1259,6 @@
#endif
LOG(INFO) << "Finished unfreezing.";
break;
-#else
- case kSbEventTypePause:
- LOG(INFO) << "Got pause event.";
- browser_module_->Blur(timestamp);
- LOG(INFO) << "Finished pausing.";
- break;
- case kSbEventTypeUnpause:
- LOG(INFO) << "Got unpause event.";
- browser_module_->Focus(timestamp);
- LOG(INFO) << "Finished unpausing.";
- break;
- case kSbEventTypeSuspend:
- LOG(INFO) << "Got suspend event.";
- browser_module_->Conceal(timestamp);
- browser_module_->Freeze(timestamp);
-#if SB_IS(EVERGREEN)
- if (updater_module_) updater_module_->Suspend();
-#endif
- LOG(INFO) << "Finished suspending.";
- break;
- case kSbEventTypeResume:
- DCHECK(SbSystemSupportsResume());
- LOG(INFO) << "Got resume event.";
- browser_module_->Unfreeze(timestamp);
- browser_module_->Reveal(timestamp);
-#if SB_IS(EVERGREEN)
- if (updater_module_) updater_module_->Resume();
-#endif
- LOG(INFO) << "Finished resuming.";
- break;
-#endif // SB_API_VERSION >= 13
case kSbEventTypeLowMemory:
DLOG(INFO) << "Got low memory event.";
browser_module_->ReduceMemory();
@@ -1285,37 +1268,26 @@
case kSbEventTypePreload:
case kSbEventTypeWindowSizeChanged:
case kSbEventTypeAccessibilityCaptionSettingsChanged:
-#if SB_API_VERSION >= 13
case kSbEventTypeAccessibilityTextToSpeechSettingsChanged:
-#else
- case kSbEventTypeAccessiblityTextToSpeechSettingsChanged:
-#endif // SB_API_VERSION >= 13
case kSbEventTypeOnScreenKeyboardBlurred:
case kSbEventTypeOnScreenKeyboardFocused:
case kSbEventTypeOnScreenKeyboardHidden:
case kSbEventTypeOnScreenKeyboardShown:
case kSbEventTypeOnScreenKeyboardSuggestionsUpdated:
-#if SB_API_VERSION >= 13
case kSbEventTypeAccessibilitySettingsChanged:
-#else
- case kSbEventTypeAccessiblitySettingsChanged:
-#endif // SB_API_VERSION >= 13
case kSbEventTypeInput:
case kSbEventTypeLink:
case kSbEventTypeScheduled:
case kSbEventTypeUser:
case kSbEventTypeVerticalSync:
-#if SB_API_VERSION >= 13
case kSbEventTypeOsNetworkDisconnected:
case kSbEventTypeOsNetworkConnected:
-#endif
-#if SB_API_VERSION >= 13
case kSbEventDateTimeConfigurationChanged:
-#endif
NOTREACHED() << "Unexpected event type: " << event_type;
return;
}
if (watchdog) watchdog->UpdateState(browser_module_->GetApplicationState());
+ AddCrashHandlerApplicationState(browser_module_->GetApplicationState());
}
void Application::OnWindowSizeChangedEvent(const base::Event* event) {
@@ -1394,7 +1366,6 @@
base::polymorphic_downcast<const base::WindowOnOfflineEvent*>(event));
}
-#if SB_API_VERSION >= 13
void Application::OnDateTimeConfigurationChangedEvent(
const base::Event* event) {
TRACE_EVENT0("cobalt::browser",
@@ -1403,7 +1374,6 @@
base::polymorphic_downcast<
const base::DateTimeConfigurationChangedEvent*>(event));
}
-#endif
void Application::MainWebModuleCreated(WebModule* web_module) {
TRACE_EVENT0("cobalt::browser", "Application::MainWebModuleCreated()");
@@ -1552,26 +1522,20 @@
DispatchEventInternal(new base::DeepLinkEvent(
deep_link, base::Bind(&Application::OnDeepLinkConsumedCallback,
base::Unretained(this), deep_link)));
-#if SB_API_VERSION >= 13
if (browser_module_) {
browser_module_->SetDeepLinkTimestamp(timestamp);
}
-#endif // SB_API_VERSION >= 13
}
void Application::DispatchDeepLinkIfNotConsumed() {
std::string deep_link;
-#if SB_API_VERSION >= 13
SbTimeMonotonic timestamp;
-#endif // SB_API_VERSION >= 13
// This block exists to ensure that the lock is held while accessing
// unconsumed_deep_link_.
{
base::AutoLock auto_lock(unconsumed_deep_link_lock_);
deep_link = unconsumed_deep_link_;
-#if SB_API_VERSION >= 13
timestamp = deep_link_timestamp_;
-#endif // SB_API_VERSION >= 13
}
if (!deep_link.empty()) {
@@ -1580,11 +1544,9 @@
deep_link, base::Bind(&Application::OnDeepLinkConsumedCallback,
base::Unretained(this), deep_link)));
}
-#if SB_API_VERSION >= 13
if (browser_module_) {
browser_module_->SetDeepLinkTimestamp(timestamp);
}
-#endif // SB_API_VERSION >= 13
}
} // namespace browser
diff --git a/cobalt/browser/application.h b/cobalt/browser/application.h
index 96480b7..04a08ca 100644
--- a/cobalt/browser/application.h
+++ b/cobalt/browser/application.h
@@ -85,9 +85,7 @@
void OnWindowOnOnlineEvent(const base::Event* event);
void OnWindowOnOfflineEvent(const base::Event* event);
-#if SB_API_VERSION >= 13
void OnDateTimeConfigurationChangedEvent(const base::Event* event);
-#endif
// Called when a navigation occurs in the BrowserModule.
void MainWebModuleCreated(WebModule* web_module);
@@ -123,13 +121,9 @@
base::EventCallback on_screen_keyboard_blurred_event_callback_;
base::EventCallback on_screen_keyboard_suggestions_updated_event_callback_;
base::EventCallback on_caption_settings_changed_event_callback_;
-#if SB_API_VERSION >= SB_NETWORK_EVENT_VERSION
base::EventCallback on_window_on_online_event_callback_;
base::EventCallback on_window_on_offline_event_callback_;
-#endif
-#if SB_API_VERSION >= 13
base::EventCallback on_date_time_configuration_changed_event_callback_;
-#endif
// Thread checkers to ensure that callbacks for network and application events
// always occur on the same thread.
diff --git a/cobalt/browser/browser_module.cc b/cobalt/browser/browser_module.cc
index e4e1481..a4f3934 100644
--- a/cobalt/browser/browser_module.cc
+++ b/cobalt/browser/browser_module.cc
@@ -280,7 +280,6 @@
#endif
on_error_retry_count_(0),
waiting_for_error_retry_(false),
- will_quit_(false),
application_state_(initial_application_state),
main_web_module_generation_(0),
next_timeline_id_(1),
@@ -691,6 +690,13 @@
on_load_event_time_ = base::TimeTicks::Now().ToInternalValue();
+#if SB_IS(EVERGREEN)
+ if (updater_module_) {
+ // Mark the Evergreen update successful if this is Evergreen-Full.
+ updater_module_->MarkSuccessful();
+ }
+#endif
+
web_module_loaded_.Signal();
options_.persistent_settings->ValidatePersistentSettings();
@@ -915,11 +921,7 @@
return;
}
#endif
-#if SB_API_VERSION >= 13
SbSystemRequestConceal();
-#else
- SbSystemRequestSuspend();
-#endif // SB_API_VERSION >= 13
}
void BrowserModule::OnWindowSizeChanged(const ViewportSize& viewport_size) {
@@ -996,7 +998,6 @@
}
}
-#if SB_API_VERSION >= 13
void BrowserModule::OnDateTimeConfigurationChanged(
const base::DateTimeConfigurationChangedEvent* event) {
DCHECK_EQ(base::MessageLoop::current(), self_message_loop_);
@@ -1005,7 +1006,6 @@
web_module_->UpdateDateTimeConfiguration();
}
}
-#endif
#if defined(ENABLE_DEBUGGER)
void BrowserModule::OnFuzzerToggle(const std::string& message) {
@@ -1309,12 +1309,7 @@
}
} else if (event.ctrl_key() && event.key_code() == dom::keycode::kS) {
if (type == base::Tokens::keydown()) {
-#if SB_API_VERSION >= 13
SbSystemRequestConceal();
-#else
- // Ctrl+S suspends Cobalt.
- SbSystemRequestSuspend();
-#endif // SB_API_VERSION >= 13
}
return false;
}
@@ -1614,12 +1609,8 @@
InstantiateRendererModule();
if (media_module_) {
-#if SB_API_VERSION >= 13
media_module_->UpdateSystemWindowAndResourceProvider(system_window_.get(),
GetResourceProvider());
-#else
- media_module_->Resume(GetResourceProvider());
-#endif // SB_API_VERSION >= 13
} else {
options_.media_module_options.allow_resume_after_suspend =
SbSystemSupportsResume();
@@ -1758,12 +1749,10 @@
// Suspend media module and update resource provider.
if (media_module_) {
-#if SB_API_VERSION >= 13
// This needs to be done before destroying the renderer module as it
// may use the renderer module to release assets during the update.
media_module_->UpdateSystemWindowAndResourceProvider(NULL,
GetResourceProvider());
-#endif // SB_API_VERSION >= 13
}
if (renderer_module_) {
@@ -1772,13 +1761,11 @@
DestroyRendererModule();
}
-#if SB_API_VERSION >= 13
// Reset system window after renderer module destroyed.
if (media_module_) {
input_device_manager_.reset();
system_window_.reset();
}
-#endif // SB_API_VERSION >= 13
}
void BrowserModule::FreezeInternal(SbTimeMonotonic timestamp) {
@@ -1815,11 +1802,9 @@
void BrowserModule::UnfreezeInternal(SbTimeMonotonic timestamp) {
TRACE_EVENT0("cobalt::browser", "BrowserModule::UnfreezeInternal()");
-// Set the Stub resource provider to media module and to web module
-// at Concealed state.
-#if SB_API_VERSION >= 13
+ // Set the Stub resource provider to media module and to web module
+ // at Concealed state.
if (media_module_) media_module_->Resume(GetResourceProvider());
-#endif // SB_API_VERSION >= 13
FOR_EACH_OBSERVER(LifecycleObserver, lifecycle_observers_,
Unfreeze(GetResourceProvider(), timestamp));
@@ -1847,10 +1832,8 @@
#endif // defined(ENABLE_DEBUGGER)
web_module_ready_to_freeze &&
application_state_ == base::kApplicationStateConcealed) {
-#if SB_API_VERSION >= 13
DLOG(INFO) << "System request to freeze the app.";
SbSystemRequestFreeze();
-#endif // SB_API_VERSION >= 13
}
}
diff --git a/cobalt/browser/browser_module.h b/cobalt/browser/browser_module.h
index 05aece7..4b01a90 100644
--- a/cobalt/browser/browser_module.h
+++ b/cobalt/browser/browser_module.h
@@ -221,10 +221,8 @@
void OnWindowOnOnlineEvent(const base::Event* event);
void OnWindowOnOfflineEvent(const base::Event* event);
-#if SB_API_VERSION >= 13
void OnDateTimeConfigurationChanged(
const base::DateTimeConfigurationChangedEvent* event);
-#endif
bool IsWebModuleLoaded() { return web_module_loaded_.IsSignaled(); }
@@ -688,15 +686,6 @@
// resume until the error retry occurs.
bool waiting_for_error_retry_;
- // Set when the application is about to quit. May be set from a thread other
- // than the one hosting this object, and read from another.
- bool will_quit_;
-
- // The |will_quit_| flag may be set from one thread (e.g. not the one hosting
- // this object) and read from another. This lock is used to
- // ensure synchronous access.
- base::Lock quit_lock_;
-
// The current application state.
base::ApplicationState application_state_;
diff --git a/cobalt/browser/client_hint_headers.cc b/cobalt/browser/client_hint_headers.cc
new file mode 100644
index 0000000..8dc7dcc
--- /dev/null
+++ b/cobalt/browser/client_hint_headers.cc
@@ -0,0 +1,55 @@
+// Copyright 2023 The Cobalt Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "cobalt/browser/client_hint_headers.h"
+
+#include <string>
+#include <vector>
+
+#include "base/strings/strcat.h"
+
+namespace cobalt {
+namespace browser {
+namespace {
+
+// Note, we intentionally will prefix all headers with 'Co' for Cobalt.
+const char kCHPrefix[] = "Sec-CH-UA-Co-";
+
+// Helper function to check for empty values and add header prefix.
+void AddHeader(std::vector<std::string>& request_headers,
+ const std::string& header, const std::string& value) {
+ if (!value.empty()) {
+ request_headers.push_back(base::StrCat({kCHPrefix, header, ":", value}));
+ }
+}
+
+} // namespace
+
+// This function is expected to be deterministic and non-dependent on global
+// variables and state. If global state should be referenced, it should be done
+// so during the creation of |platform_info| instead.
+std::vector<std::string> GetClientHintHeaders(
+ const UserAgentPlatformInfo& platform_info) {
+ std::vector<std::string> headers;
+
+ AddHeader(headers, "Firmware-Version-Details",
+ platform_info.firmware_version_details());
+
+ AddHeader(headers, "OS-Experience", platform_info.os_experience());
+
+ return headers;
+}
+
+} // namespace browser
+} // namespace cobalt
diff --git a/cobalt/browser/client_hint_headers.h b/cobalt/browser/client_hint_headers.h
new file mode 100644
index 0000000..6e77039
--- /dev/null
+++ b/cobalt/browser/client_hint_headers.h
@@ -0,0 +1,34 @@
+// Copyright 2023 The Cobalt Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COBALT_BROWSER_CLIENT_HINT_HEADERS_H_
+#define COBALT_BROWSER_CLIENT_HINT_HEADERS_H_
+
+#include <string>
+#include <vector>
+
+#include "cobalt/browser/user_agent_platform_info.h"
+
+namespace cobalt {
+namespace browser {
+
+// This function is deterministic and non-dependent on global variables and
+// state. It is dependent only on |platform_info|.
+std::vector<std::string> GetClientHintHeaders(
+ const UserAgentPlatformInfo& platform_info);
+
+} // namespace browser
+} // namespace cobalt
+
+#endif // COBALT_BROWSER_CLIENT_HINT_HEADERS_H_
diff --git a/cobalt/browser/client_hint_headers_test.cc b/cobalt/browser/client_hint_headers_test.cc
new file mode 100644
index 0000000..f96626a
--- /dev/null
+++ b/cobalt/browser/client_hint_headers_test.cc
@@ -0,0 +1,43 @@
+// Copyright 2023 The Cobalt Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "cobalt/browser/client_hint_headers.h"
+
+#include "cobalt/browser/user_agent_platform_info.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace cobalt {
+namespace browser {
+
+namespace {
+
+using ::testing::UnorderedElementsAre;
+
+TEST(ClientHintHeadersTest, GetClientHintHeaders) {
+ UserAgentPlatformInfo platform_info;
+ platform_info.set_firmware_version_details("abc/def:123.456/xy-z");
+ platform_info.set_os_experience("Amati");
+
+ std::vector<std::string> headers = GetClientHintHeaders(platform_info);
+ EXPECT_THAT(headers,
+ UnorderedElementsAre(
+ "Sec-CH-UA-Co-Firmware-Version-Details:abc/def:123.456/xy-z",
+ "Sec-CH-UA-Co-OS-Experience:Amati"));
+}
+
+} // namespace
+
+} // namespace browser
+} // namespace cobalt
diff --git a/cobalt/browser/device_authentication.cc b/cobalt/browser/device_authentication.cc
index 050857e..bf01d14 100644
--- a/cobalt/browser/device_authentication.cc
+++ b/cobalt/browser/device_authentication.cc
@@ -32,25 +32,6 @@
constexpr size_t kSHA256DigestSize = 32;
-#if SB_API_VERSION < 13
-bool ComputeSignatureWithSystemPropertySecret(const std::string& message,
- uint8_t* signature) {
- const size_t kBase64EncodedCertificationSecretLength = 1023;
- char base_64_secret_property[kBase64EncodedCertificationSecretLength + 1] = {
- 0};
- bool result = SbSystemGetProperty(
- kSbSystemPropertyBase64EncodedCertificationSecret,
- base_64_secret_property, kBase64EncodedCertificationSecretLength);
- if (!result) {
- return false;
- }
-
- ComputeHMACSHA256SignatureWithProvidedKey(message, base_64_secret_property,
- signature, kSHA256DigestSize);
- return true;
-}
-#endif // SB_API_VERSION < 13
-
bool ComputeSignatureFromSignAPI(const std::string& message,
uint8_t* signature) {
return SbSystemSignWithCertificationSecretKey(
@@ -68,10 +49,6 @@
if (ComputeSignatureFromSignAPI(message, signature)) {
DLOG(INFO) << "Using certification signature provided by "
<< "SbSystemSignWithCertificationSecretKey().";
-#if SB_API_VERSION < 13
- } else if (ComputeSignatureWithSystemPropertySecret(message, signature)) {
- DLOG(INFO) << "Using certification key from SbSystemGetProperty().";
-#endif // SB_API_VERSION < 13
} else {
return std::string();
}
diff --git a/cobalt/browser/lifecycle_console_commands.cc b/cobalt/browser/lifecycle_console_commands.cc
index 6e8253c..3784e29 100644
--- a/cobalt/browser/lifecycle_console_commands.cc
+++ b/cobalt/browser/lifecycle_console_commands.cc
@@ -50,7 +50,6 @@
"ending the process (peacefully).";
namespace {
-#if SB_API_VERSION >= 13
// This is temporary that will be changed in later CLs, for mapping Starboard
// Concealed state support onto Cobalt without Concealed state support to be
// able to test the former.
@@ -63,17 +62,6 @@
<< "reveal Cobalt using a platform-specific method.";
SbSystemRequestConceal();
}
-#else
-void OnPause(const std::string& message) { SbSystemRequestPause(); }
-
-void OnUnpause(const std::string& message) { SbSystemRequestUnpause(); }
-
-void OnSuspend(const std::string& message) {
- LOG(INFO) << "Suspending Cobalt through the console, but you will need to "
- << "resume Cobalt using a platform-specific method.";
- SbSystemRequestSuspend();
-}
-#endif // SB_API_VERSION >= 13
void OnQuit(const std::string& /*message*/) { SbSystemRequestStop(0); }
} // namespace
diff --git a/cobalt/browser/main.cc b/cobalt/browser/main.cc
index 371613c..7f87bc0 100644
--- a/cobalt/browser/main.cc
+++ b/cobalt/browser/main.cc
@@ -63,9 +63,8 @@
}
LOG(INFO) << "Concealing application.";
DCHECK(!g_application);
- g_application = new cobalt::browser::Application(quit_closure,
- true /*should_preload*/,
- timestamp);
+ g_application = new cobalt::browser::Application(
+ quit_closure, true /*should_preload*/, timestamp);
DCHECK(g_application);
}
@@ -77,22 +76,10 @@
return;
}
LOG(INFO) << "Starting application.";
-#if SB_API_VERSION >= 13
DCHECK(!g_application);
- g_application = new cobalt::browser::Application(quit_closure,
- false /*not_preload*/,
- timestamp);
+ g_application = new cobalt::browser::Application(
+ quit_closure, false /*not_preload*/, timestamp);
DCHECK(g_application);
-#else
- if (!g_application) {
- g_application = new cobalt::browser::Application(quit_closure,
- false /*should_preload*/,
- timestamp);
- DCHECK(g_application);
- } else {
- g_application->Start(timestamp);
- }
-#endif // SB_API_VERSION >= 13
}
void StopApplication() {
diff --git a/cobalt/browser/snapshot_app_stats.cc b/cobalt/browser/snapshot_app_stats.cc
deleted file mode 100644
index e52b721..0000000
--- a/cobalt/browser/snapshot_app_stats.cc
+++ /dev/null
@@ -1,167 +0,0 @@
-// Copyright 2015 The Cobalt Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <cstdio>
-#include <map>
-
-#include "base/command_line.h"
-#include "base/threading/thread.h"
-#include "base/time/time.h"
-#include "cobalt/base/c_val.h"
-#include "cobalt/base/wrap_main.h"
-#include "cobalt/browser/application.h"
-#include "cobalt/browser/switches.h"
-
-namespace {
-
-// How many seconds to wait after starting the application before taking the
-// snapshot of CVal values.
-const int kSecondsToWait = 20;
-
-// The list of CVals we are interested in reporting.
-//
-// Note that if you add an item to this list, you should also add a column
-// to the stats tracking table in the database. Please see the document titled
-// "Adding a SnapshotAppStats column" on the Cobalt intranet home page for
-// examples on how to add a new column.
-
-// The naming convention for CVal columns in the database are the same as the
-// name of the CVal except with the dots removed and the CVal name converted to
-// camel case. For example, "DOM.TokenLists" would become "dOMTokenLists".
-//
-// A document that explains how to modify the database in more detail can be
-// found called "Dashboard TDD" on the Cobalt intranet home page.
-
-const char* g_cvals_to_snapshot[] = {
- "Count.DOM.Nodes",
- "Count.DOM.TokenLists",
- "Count.XHR",
- "Memory.ArrayBuffer",
- "Memory.CPU.Exe",
- "Memory.CPU.Used",
- "Memory.GraphicsPS3.Fixed.Size",
- "Memory.JS",
- "Memory.MainWebModule.ImageCache.Size",
- "Memory.MainWebModule.RemoteTypefaceCache.Size",
- "Memory.Media.AudioDecoder",
- "Memory.Media.MediaSource.CPU.Fixed.Capacity",
- "Memory.Media.VideoDecoder",
- "Memory.XHR",
-};
-
-PRINTF_FORMAT(1, 2) void Output(const char* fmt, ...) {
- va_list ap;
- va_start(ap, fmt);
-
- std::vfprintf(stdout, fmt, ap);
-
- va_end(ap);
-
- std::fflush(stdout);
-}
-
-typedef std::map<std::string, std::string> CValsMap;
-
-// Returns all CVals along with their values.
-CValsMap GetAllCValValues() {
- base::CValManager* cvm = base::CValManager::GetInstance();
- std::set<std::string> cvals = cvm->GetOrderedCValNames();
- CValsMap cvals_with_values;
-
- for (std::set<std::string>::iterator iter = cvals.begin();
- iter != cvals.end(); ++iter) {
- base::Optional<std::string> cval_value = cvm->GetValueAsString(*iter);
- if (cval_value) {
- cvals_with_values[*iter] = *cval_value;
- }
- }
-
- return cvals_with_values;
-}
-
-// Grab a snapshot of all current CVals and their values and then output them
-// so that they can be analyzed by humans and inserted into a database where
-// the values can be graphed.
-void DoStatsSnapshot(cobalt::browser::Application* application) {
- Output("---Benchmark Results Start---\n");
- Output("{\n");
- Output(" \"LiveYouTubeAfter%dSecondsStatsSnapshot\": {\n", kSecondsToWait);
-
- CValsMap cval_values = GetAllCValValues();
- bool have_printed_results = false;
- for (size_t i = 0; i < arraysize(g_cvals_to_snapshot); ++i) {
- CValsMap::iterator found = cval_values.find(g_cvals_to_snapshot[i]);
- if (found != cval_values.end()) {
- if (have_printed_results) {
- Output(",\n");
- } else {
- have_printed_results = true;
- }
- Output(" \"%s\": %s", found->first.c_str(), found->second.c_str());
- }
- }
-
- Output("\n");
-
- Output(" }\n");
- Output("}\n");
- Output("---Benchmark Results End---\n");
-
- application->Quit();
-}
-
-} // namespace
-
-namespace {
-
-cobalt::browser::Application* g_application = NULL;
-base::Thread* g_snapshot_thread = NULL;
-
-void StartApplication(int argc, char* argv[], const char* link,
- const base::Closure& quit_closure,
- SbTimeMonotonic timestamp) {
- logging::SetMinLogLevel(100);
-
- // Use null storage for our savegame so that we don't persist state from
- // one run to another, which makes the measurements more deterministic (e.g.
- // we will not consistently register for experiments via cookies).
- base::CommandLine::ForCurrentProcess()->AppendSwitch(
- cobalt::browser::switches::kNullSavegame);
- base::CommandLine::ForCurrentProcess()->AppendSwitchASCII(
- cobalt::browser::switches::kDebugConsoleMode, "off");
-
- // Create the application object just like is done in the Cobalt main app.
- g_application =
- new cobalt::browser::Application(quit_closure, false /*should_preload*/,
- timestamp);
-
- // Create a thread to start a timer for kSecondsToWait seconds after which
- // we will take a snapshot of the CVals at that time and then quit the
- // application.
- g_snapshot_thread = new base::Thread("StatsSnapshot");
- g_snapshot_thread->Start();
- g_snapshot_thread->message_loop()->task_runner()->PostDelayedTask(
- FROM_HERE, base::Bind(&DoStatsSnapshot, g_application),
- base::TimeDelta::FromSeconds(kSecondsToWait));
-}
-
-void StopApplication() {
- g_snapshot_thread->Stop();
- delete g_application;
- g_application = NULL;
-}
-
-} // namespace
-
-COBALT_WRAP_BASE_MAIN(StartApplication, StopApplication);
diff --git a/cobalt/browser/splash_screen.cc b/cobalt/browser/splash_screen.cc
index ea764a6..62e8e4b 100644
--- a/cobalt/browser/splash_screen.cc
+++ b/cobalt/browser/splash_screen.cc
@@ -20,6 +20,7 @@
#include "base/callback.h"
#include "base/cancelable_callback.h"
#include "base/threading/platform_thread.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "base/time/time.h"
#include "cobalt/browser/splash_screen_cache.h"
#include "cobalt/dom/window.h"
@@ -132,7 +133,7 @@
DCHECK(!ShutdownSignaled()) << "Shutdown() should be called at most once.";
if (!on_splash_screen_shutdown_complete_.callback().is_null()) {
- base::MessageLoop::current()->task_runner()->PostDelayedTask(
+ base::ThreadTaskRunnerHandle::Get()->PostDelayedTask(
FROM_HERE,
base::Bind(on_splash_screen_shutdown_complete_.callback(),
base::TimeDelta()),
diff --git a/cobalt/browser/suspend_fuzzer.cc b/cobalt/browser/suspend_fuzzer.cc
index 4c71c2c..eeebfba 100644
--- a/cobalt/browser/suspend_fuzzer.cc
+++ b/cobalt/browser/suspend_fuzzer.cc
@@ -14,6 +14,8 @@
#include "cobalt/browser/suspend_fuzzer.h"
+#include "base/threading/thread_task_runner_handle.h"
+
namespace cobalt {
namespace browser {
@@ -31,13 +33,8 @@
} // namespace
-#if SB_API_VERSION >= 13
SuspendFuzzer::SuspendFuzzer()
: thread_("suspend_fuzzer"), step_type_(kShouldRequestFreeze) {
-#else
-SuspendFuzzer::SuspendFuzzer()
- : thread_("suspend_fuzzer"), step_type_(kShouldRequestSuspend) {
-#endif // SB_API_VERSION >= 13
thread_.Start();
thread_.message_loop()->task_runner()->PostDelayedTask(
FROM_HERE, base::Bind(&SuspendFuzzer::DoStep, base::Unretained(this)),
@@ -48,7 +45,6 @@
void SuspendFuzzer::DoStep() {
DCHECK(base::MessageLoop::current() == thread_.message_loop());
-#if SB_API_VERSION >= 13
if (step_type_ == kShouldRequestFreeze) {
SB_DLOG(INFO) << "suspend_fuzzer: Requesting freeze.";
SbSystemRequestFreeze();
@@ -60,21 +56,8 @@
} else {
NOTREACHED();
}
-#else
- if (step_type_ == kShouldRequestSuspend) {
- SB_DLOG(INFO) << "suspend_fuzzer: Requesting suspend.";
- SbSystemRequestSuspend();
- step_type_ = kShouldRequestSuspend;
- } else if (step_type_ == kShouldRequestUnpause) {
- SB_DLOG(INFO) << "suspend_fuzzer: Requesting unpause.";
- SbSystemRequestUnpause();
- step_type_ = kShouldRequestUnpause;
- } else {
- NOTREACHED();
- }
-#endif // SB_API_VERSION >= 13
- base::MessageLoop::current()->task_runner()->PostDelayedTask(
+ base::ThreadTaskRunnerHandle::Get()->PostDelayedTask(
FROM_HERE, base::Bind(&SuspendFuzzer::DoStep, base::Unretained(this)),
kInterval);
}
diff --git a/cobalt/browser/suspend_fuzzer.h b/cobalt/browser/suspend_fuzzer.h
index d11ecd9..19b5053 100644
--- a/cobalt/browser/suspend_fuzzer.h
+++ b/cobalt/browser/suspend_fuzzer.h
@@ -37,13 +37,8 @@
base::Thread thread_;
enum StepType {
-#if SB_API_VERSION >= 13
kShouldRequestFreeze,
kShouldRequestFocus,
-#else
- kShouldRequestSuspend,
- kShouldRequestUnpause,
-#endif // SB_API_VERSION >= 13
} step_type_;
};
diff --git a/cobalt/browser/switches.cc b/cobalt/browser/switches.cc
index e746329..e455eeb 100644
--- a/cobalt/browser/switches.cc
+++ b/cobalt/browser/switches.cc
@@ -147,6 +147,11 @@
"Forbid Cobalt to start without receiving csp headers which is enabled by "
"default in production.";
+const char kAllowAllCrossOrigin[] = "allow_all_cross_origin";
+const char kAllowAllCrossOriginHelp[] =
+ "Disables SOP checks and allows all cross-origin connections in "
+ "development builds.";
+
const char kRequireHTTPSLocation[] = "require_https";
const char kRequireHTTPSLocationHelp[] =
"Ask Cobalt to only accept https url which is enabled by default in "
@@ -458,6 +463,7 @@
{kMinCompatibilityVersion, kMinCompatibilityVersionHelp},
{kNullSavegame, kNullSavegameHelp}, {kProd, kProdHelp},
{kRequireCSP, kRequireCSPHelp},
+ {kAllowAllCrossOrigin, kAllowAllCrossOriginHelp},
{kRequireHTTPSLocation, kRequireHTTPSLocationHelp},
{kShutdownAfter, kShutdownAfterHelp},
{kStubImageDecoder, kStubImageDecoderHelp},
diff --git a/cobalt/browser/switches.h b/cobalt/browser/switches.h
index 859b52c..c1401d7 100644
--- a/cobalt/browser/switches.h
+++ b/cobalt/browser/switches.h
@@ -36,6 +36,8 @@
extern const char kWaitForWebDebuggerHelp[];
#endif // ENABLE_DEBUGGER
+extern const char kAllowAllCrossOrigin[];
+extern const char kAllowAllCrossOriginHelp[];
extern const char kDisableImageAnimations[];
extern const char kDisableImageAnimationsHelp[];
extern const char kForceDeterministicRendering[];
diff --git a/cobalt/browser/user_agent_platform_info.cc b/cobalt/browser/user_agent_platform_info.cc
index e89f79e..30b4b39 100644
--- a/cobalt/browser/user_agent_platform_info.cc
+++ b/cobalt/browser/user_agent_platform_info.cc
@@ -27,6 +27,8 @@
#include "cobalt_build_id.h" // NOLINT(build/include_subdir)
#include "starboard/common/log.h"
#include "starboard/common/string.h"
+#include "starboard/common/system_property.h"
+#include "starboard/extension/platform_info.h"
#if SB_IS(EVERGREEN)
#include "starboard/extension/installation_manager.h"
#endif // SB_IS(EVERGREEN)
@@ -35,6 +37,8 @@
#include "cobalt/updater/utils.h"
#endif
+using starboard::kSystemPropertyMaxLength;
+
namespace cobalt {
namespace browser {
@@ -106,6 +110,8 @@
namespace {
+#if SB_API_VERSION < 15
+
struct DeviceTypeName {
SbSystemDeviceType device_type;
char device_type_string[10];
@@ -145,6 +151,7 @@
return kSbSystemDeviceTypeUnknown;
}
#endif
+#endif // SB_API_VERSION < 15
static bool isAsciiAlphaDigit(int c) {
return base::IsAsciiAlpha(c) || base::IsAsciiDigit(c);
@@ -216,7 +223,6 @@
info.set_starboard_version(
base::StringPrintf("Starboard/%d", SB_API_VERSION));
- const size_t kSystemPropertyMaxLength = 1024;
char value[kSystemPropertyMaxLength];
bool result;
@@ -265,6 +271,22 @@
}
#endif
+ // Additional Platform Info
+ auto platform_info_extension =
+ static_cast<const CobaltExtensionPlatformInfoApi*>(
+ SbSystemGetExtension(kCobaltExtensionPlatformInfoName));
+ if (platform_info_extension &&
+ strcmp(platform_info_extension->name, kCobaltExtensionPlatformInfoName) ==
+ 0 &&
+ platform_info_extension->version >= 1) {
+ result = platform_info_extension->GetFirmwareVersionDetails(
+ value, kSystemPropertyMaxLength);
+ if (result) {
+ info.set_firmware_version_details(value);
+ }
+ info.set_os_experience(platform_info_extension->GetOsExperience());
+ }
+
info.set_cobalt_version(COBALT_VERSION);
info.set_cobalt_build_version_number(COBALT_BUILD_VERSION_NUMBER);
@@ -286,8 +308,15 @@
info.set_aux_field(value);
}
+#if SB_API_VERSION >= 15
+ result = SbSystemGetProperty(kSbSystemPropertyDeviceType, value,
+ kSystemPropertyMaxLength);
+ SB_DCHECK(result);
+ info.set_device_type(value);
+#else
// Fill platform info if it is a hardware TV device.
info.set_device_type(SbSystemGetDeviceType());
+#endif
// Chipset model number
result = SbSystemGetProperty(kSbSystemPropertyChipsetModelNumber, value,
@@ -354,7 +383,11 @@
info.set_original_design_manufacturer(input.second);
LOG(INFO) << "Set original design manufacturer to " << input.second;
} else if (!input.first.compare("device_type")) {
+#if SB_API_VERSION < 15
info.set_device_type(GetDeviceType(input.second));
+#else
+ info.set_device_type(input.second);
+#endif
LOG(INFO) << "Set device type to " << input.second;
} else if (!input.first.compare("chipset_model_number")) {
info.set_chipset_model_number(input.second);
@@ -389,6 +422,12 @@
} else if (!input.first.compare("evergreen_version")) {
info.set_evergreen_version(input.second);
LOG(INFO) << "Set evergreen version to " << input.second;
+ } else if (!input.first.compare("firmware_version_details")) {
+ info.set_firmware_version_details(input.second);
+ LOG(INFO) << "Set firmware version details to " << input.second;
+ } else if (!input.first.compare("os_experience")) {
+ info.set_os_experience(input.second);
+ LOG(INFO) << "Set os experience to " << input.second;
} else if (!input.first.compare("cobalt_version")) {
info.set_cobalt_version(input.second);
LOG(INFO) << "Set cobalt type to " << input.second;
@@ -429,10 +468,16 @@
Sanitize(original_design_manufacturer, isAsciiAlphaDigit);
}
}
+
+#if SB_API_VERSION < 15
void UserAgentPlatformInfo::set_device_type(SbSystemDeviceType device_type) {
device_type_ = device_type;
device_type_string_ = CreateDeviceTypeString(device_type_);
}
+#endif
+void UserAgentPlatformInfo::set_device_type(const std::string& device_type) {
+ device_type_string_ = device_type;
+}
void UserAgentPlatformInfo::set_chipset_model_number(
base::Optional<std::string> chipset_model_number) {
@@ -497,6 +542,17 @@
evergreen_version_ = Sanitize(evergreen_version, isTCHAR);
}
+void UserAgentPlatformInfo::set_firmware_version_details(
+ const std::string& firmware_version_details) {
+ firmware_version_details_ =
+ Sanitize(firmware_version_details, isVCHARorSpace);
+}
+
+void UserAgentPlatformInfo::set_os_experience(
+ const std::string& os_experience) {
+ os_experience_ = Sanitize(os_experience, isTCHAR);
+}
+
void UserAgentPlatformInfo::set_cobalt_version(
const std::string& cobalt_version) {
cobalt_version_ = Sanitize(cobalt_version, isTCHAR);
diff --git a/cobalt/browser/user_agent_platform_info.h b/cobalt/browser/user_agent_platform_info.h
index d71c7fe..26b0b64 100644
--- a/cobalt/browser/user_agent_platform_info.h
+++ b/cobalt/browser/user_agent_platform_info.h
@@ -43,7 +43,11 @@
base::Optional<std::string> original_design_manufacturer() const override {
return original_design_manufacturer_;
}
+
+#if SB_API_VERSION < 15
SbSystemDeviceType device_type() const override { return device_type_; }
+#endif
+
const std::string& device_type_string() const override {
return device_type_string_;
}
@@ -72,6 +76,10 @@
const std::string& evergreen_version() const override {
return evergreen_version_;
}
+ const std::string& firmware_version_details() const override {
+ return firmware_version_details_;
+ }
+ const std::string& os_experience() const override { return os_experience_; }
const std::string& cobalt_version() const override { return cobalt_version_; }
const std::string& cobalt_build_version_number() const override {
return cobalt_build_version_number_;
@@ -86,7 +94,10 @@
void set_os_name_and_version(const std::string& os_name_and_version);
void set_original_design_manufacturer(
base::Optional<std::string> original_design_manufacturer);
+#if SB_API_VERSION < 15
void set_device_type(SbSystemDeviceType device_type);
+#endif
+ void set_device_type(const std::string& device_type);
void set_chipset_model_number(
base::Optional<std::string> chipset_model_number);
void set_model_year(base::Optional<std::string> model_year);
@@ -100,6 +111,9 @@
void set_evergreen_type(const std::string& evergreen_type);
void set_evergreen_file_type(const std::string& evergreen_file_type);
void set_evergreen_version(const std::string& evergreen_version);
+ void set_firmware_version_details(
+ const std::string& firmware_version_details);
+ void set_os_experience(const std::string& os_experience);
void set_cobalt_version(const std::string& cobalt_version);
void set_cobalt_build_version_number(
const std::string& cobalt_build_version_number);
@@ -109,7 +123,9 @@
std::string starboard_version_;
std::string os_name_and_version_;
base::Optional<std::string> original_design_manufacturer_;
+#if SB_API_VERSION < 15
SbSystemDeviceType device_type_ = kSbSystemDeviceTypeUnknown;
+#endif
std::string device_type_string_;
base::Optional<std::string> chipset_model_number_;
base::Optional<std::string> model_year_;
@@ -122,6 +138,8 @@
std::string evergreen_type_;
std::string evergreen_file_type_;
std::string evergreen_version_;
+ std::string firmware_version_details_; // Only via Client Hints
+ std::string os_experience_; // Only via Client Hints
std::string cobalt_version_;
std::string cobalt_build_version_number_;
diff --git a/cobalt/browser/user_agent_string_test.cc b/cobalt/browser/user_agent_string_test.cc
index 27e232b..ab6a09b 100644
--- a/cobalt/browser/user_agent_string_test.cc
+++ b/cobalt/browser/user_agent_string_test.cc
@@ -12,10 +12,11 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+#include "cobalt/browser/user_agent_string.h"
+
#include <map>
#include "cobalt/browser/user_agent_platform_info.h"
-#include "cobalt/browser/user_agent_string.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace cobalt {
@@ -28,7 +29,7 @@
platform_info.set_starboard_version("");
platform_info.set_os_name_and_version("");
platform_info.set_original_design_manufacturer("");
- platform_info.set_device_type(kSbSystemDeviceTypeUnknown);
+ platform_info.set_device_type("UNKNOWN");
platform_info.set_chipset_model_number("");
platform_info.set_model_year("");
platform_info.set_firmware_version("");
@@ -306,7 +307,7 @@
UserAgentPlatformInfo platform_info =
CreateOnlyOSNameAndVersionPlatformInfo();
platform_info.set_original_design_manufacturer("Aperture_Science_Innovators");
- platform_info.set_device_type(kSbSystemDeviceTypeOverTheTopBox);
+ platform_info.set_device_type("OTT");
platform_info.set_chipset_model_number("P-body/Orange_Atlas/Blue");
platform_info.set_model_year("2013");
platform_info.set_firmware_version("0,01");
@@ -323,7 +324,7 @@
TEST(UserAgentStringFactoryTest, WithOnlyBrandModelAndDeviceType) {
UserAgentPlatformInfo platform_info =
CreateOnlyOSNameAndVersionPlatformInfo();
- platform_info.set_device_type(kSbSystemDeviceTypeOverTheTopBox);
+ platform_info.set_device_type("OTT");
platform_info.set_brand("Aperture Science");
platform_info.set_model("GLaDOS");
std::string user_agent_string = CreateUserAgentString(platform_info);
@@ -336,7 +337,7 @@
TEST(UserAgentStringFactoryTest, WithStarboardVersion) {
UserAgentPlatformInfo platform_info = CreateEmptyPlatformInfo();
platform_info.set_starboard_version("Starboard/6");
- platform_info.set_device_type(kSbSystemDeviceTypeOverTheTopBox);
+ platform_info.set_device_type("OTT");
std::string user_agent_string = CreateUserAgentString(platform_info);
const char* tv_info_str =
@@ -429,7 +430,8 @@
GetUserAgentInputMap(user_agent_input, user_agent_input_map);
std::map<std::string, std::string> expected_user_agent_input_map{
- {"aux_field", ""}, {"device_type", "GAME"},
+ {"aux_field", ""},
+ {"device_type", "GAME"},
};
EXPECT_TRUE(user_agent_input_map == expected_user_agent_input_map);
}
diff --git a/cobalt/browser/web_module.cc b/cobalt/browser/web_module.cc
index f746098..33ca0be 100644
--- a/cobalt/browser/web_module.cc
+++ b/cobalt/browser/web_module.cc
@@ -28,6 +28,7 @@
#include "base/strings/stringprintf.h"
#include "base/synchronization/waitable_event.h"
#include "base/threading/thread_checker.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "base/trace_event/trace_event.h"
#include "cobalt/base/c_val.h"
#include "cobalt/base/debugger_hooks.h"
@@ -615,7 +616,7 @@
// Post a task that blocks the message loop and waits for the web debugger.
// This must be posted before the the window's task to load the document.
waiting_for_web_debugger_->store(true);
- base::MessageLoop::current()->task_runner()->PostTask(
+ base::ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE, base::Bind(&WebModule::Impl::WaitForWebDebugger,
base::Unretained(this)));
}
@@ -949,8 +950,7 @@
LayoutResults layout_results_with_callback(
layout_results.render_tree, layout_results.layout_time,
base::Bind(&WebModule::Impl::OnRenderTreeRasterized,
- base::Unretained(this),
- base::MessageLoop::current()->task_runner(),
+ base::Unretained(this), base::ThreadTaskRunnerHandle::Get(),
last_render_tree_produced_time_));
#if defined(ENABLE_DEBUGGER)
@@ -1027,7 +1027,7 @@
base::Bind(&WebModule::Impl::global_environment, base::Unretained(this)),
base::Bind(&WebModule::Impl::InjectKeyboardEvent, base::Unretained(this)),
base::Bind(&WebModule::Impl::InjectPointerEvent, base::Unretained(this)),
- base::MessageLoop::current()->task_runner()));
+ base::ThreadTaskRunnerHandle::Get()));
}
#endif // defined(ENABLE_WEBDRIVER)
diff --git a/cobalt/build/build_id.py b/cobalt/build/build_id.py
index 1abe051..1c6791f 100755
--- a/cobalt/build/build_id.py
+++ b/cobalt/build/build_id.py
@@ -13,12 +13,9 @@
# limitations under the License.
"""Generate a Cobalt build ID header."""
-import datetime
-import os
import sys
-import time
-template = """
+BUILD_ID_HEADER_TEMPLATE = """
#ifndef _COBALT_BUILD_ID_H_
#define _COBALT_BUILD_ID_H_
@@ -32,7 +29,7 @@
def BuildId(output_path, version_number):
- """Write a Cobalt build_id header file with time and version info.
+ """Write a Cobalt build_id header file version info.
Args:
output_path: Location of the build id header to write.
@@ -40,21 +37,9 @@
Returns:
0 on success.
"""
- username = os.environ.get('USERNAME', os.environ.get('USER'))
- if not username:
- username = 'unknown'
- timestamp = time.time()
- date_rep = datetime.datetime.fromtimestamp(timestamp).strftime('%c')
-
- with open(output_path, 'w') as f:
- f.write(
- template.format(
- date_rep=date_rep,
- timestamp=int(timestamp),
- version_number=version_number,
- username=username))
- return 0
+ with open(output_path, 'w', encoding='utf-8') as f:
+ f.write(BUILD_ID_HEADER_TEMPLATE.format(version_number=version_number))
if __name__ == '__main__':
- sys.exit(BuildId(sys.argv[1], sys.argv[2]))
+ BuildId(sys.argv[1], sys.argv[2])
diff --git a/cobalt/build/build_number.py b/cobalt/build/build_number.py
deleted file mode 100644
index 7ec534e..0000000
--- a/cobalt/build/build_number.py
+++ /dev/null
@@ -1,110 +0,0 @@
-# Copyright 2012 The Cobalt Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Utilities for use by gyp_cobalt and other build tools."""
-
-import json
-import logging
-import os
-import subprocess
-from six.moves import urllib
-from cobalt.tools import paths
-
-_SUBREPO_PATHS = ['starboard/keyboxes']
-_VERSION_SERVER_URL = 'https://carbon-airlock-95823.appspot.com/build_version/generate' # pylint:disable=line-too-long
-_XSSI_PREFIX = ")]}'\n"
-
-# The path to the build.id file that preserves a build ID.
-BUILD_ID_PATH = os.path.join(paths.BUILD_ROOT, 'build.id')
-
-
-def CheckRevInfo(key, cwd=None):
- git_get_remote_args = ['git', 'config', '--get', 'remote.origin.url']
- remote = subprocess.check_output(
- git_get_remote_args, cwd=cwd).strip().decode('utf-8')
-
- if remote.endswith('.git'):
- remote = remote[:-len('.git')]
-
- git_get_revision_args = ['git', 'rev-parse', 'HEAD']
- revision = subprocess.check_output(
- git_get_revision_args, cwd=cwd).strip().decode('utf-8')
- return {key: '{}@{}'.format(remote, revision)}
-
-
-def GetRevinfo():
- """Get absolute state of all git repos."""
- # First make sure we can add the cobalt_src repo.
- try:
- repos = CheckRevInfo('.', cwd=paths.REPOSITORY_ROOT)
- except subprocess.CalledProcessError as e:
- logging.warning('Failed to get revision information: %s', e)
- return {}
-
- for rel_path in _SUBREPO_PATHS:
- path = os.path.join(paths.REPOSITORY_ROOT, rel_path)
- try:
- repos.update(CheckRevInfo(rel_path, cwd=path))
- except subprocess.CalledProcessError as e:
- logging.warning('Failed to get revision information for subrepo %s: %s',
- rel_path, e)
- continue
- except OSError as e:
- logging.info('%s. Subrepository %s not found.', e, rel_path)
- continue
-
- return repos
-
-
-# We leave this function in for backwards compatibility.
-# New callers should use GetOrGenerateNewBuildNumber.
-def GetBuildNumber(version_server=_VERSION_SERVER_URL):
- return GetOrGenerateNewBuildNumber(version_server)
-
-
-def GetOrGenerateNewBuildNumber(version_server=_VERSION_SERVER_URL):
- """Send a request to the build version server for a build number."""
-
- if os.path.isfile(BUILD_ID_PATH):
- with open(BUILD_ID_PATH, 'r') as build_id_file:
- build_number = int(build_id_file.read().replace('\n', ''))
- logging.info('Retrieving build number from %s', BUILD_ID_PATH)
- return build_number
-
- revinfo = GetRevinfo()
- json_deps = json.dumps(revinfo)
- username = os.environ.get('USERNAME', os.environ.get('USER'))
-
- post_data = {'deps': json_deps}
- if username:
- post_data['user'] = username
-
- logging.debug('Post data is %s', post_data)
- request = urllib.request.Request(version_server)
- # TODO: retry on timeout.
- try:
- response = urllib.request.urlopen( # pylint: disable=consider-using-with
- request,
- data=urllib.parse.urlencode(post_data).encode('utf-8'))
- data = response.read().decode('utf-8')
- if data.find(_XSSI_PREFIX) == 0:
- data = data[len(_XSSI_PREFIX):]
- results = json.loads(data)
- build_number = results.get('build_number', 0)
- return build_number
- except urllib.error.HTTPError as e:
- logging.warning('Failed to retrieve build number: %s', e)
- return 0
- except urllib.error.URLError as e:
- logging.warning('Could not connect to %s: %s', version_server, e)
- return 0
diff --git a/cobalt/build/cobalt_configuration.py b/cobalt/build/cobalt_configuration.py
index a8c09b1..adaa361 100644
--- a/cobalt/build/cobalt_configuration.py
+++ b/cobalt/build/cobalt_configuration.py
@@ -13,12 +13,7 @@
# limitations under the License.
"""Base cobalt configuration for GYP."""
-import os
-
-from cobalt.tools import paths
-import cobalt.tools.webdriver_benchmark_config as wb_config
from starboard.build import application_configuration
-from starboard.tools.config import Config
# The canonical Cobalt application name.
APPLICATION_NAME = 'cobalt'
@@ -30,40 +25,6 @@
Cobalt per-platform configurations, if defined, must subclass from this class.
"""
- def GetVariables(self, config_name):
-
- # Use env var to optimize build speed on CI
- try:
- # Force to int, so it's easy to pass in an override.
- use_fastbuild = int(os.environ.get('IS_CI', 0))
- except (ValueError, TypeError):
- use_fastbuild = 0
-
- try:
- build_in_docker = int(os.environ.get('IS_DOCKER', 0))
- except (ValueError, TypeError):
- build_in_docker = 0
-
- variables = {
- # This is used to omit large debuginfo in files on CI environment
- 'cobalt_fastbuild': use_fastbuild,
- 'cobalt_docker_build': build_in_docker,
-
- # This is here rather than cobalt_configuration.gypi so that it's
- # available for browser_bindings_gen.gyp.
- 'enable_debugger': 0 if config_name == Config.GOLD else 1,
-
- # Cobalt uses OpenSSL on all platforms.
- 'use_openssl': 1,
- }
- return variables
-
- def GetPostIncludes(self):
- # Insert cobalt_configuration.gypi into the post includes list.
- includes = super(CobaltConfiguration, self).GetPostIncludes()
- includes[:0] = [os.path.join(paths.BUILD_ROOT, 'cobalt_configuration.gypi')]
- return includes
-
def GetWebPlatformTestFilters(self):
"""Gets all tests to be excluded from a black box test run."""
@@ -175,6 +136,7 @@
'poem_unittests',
'render_tree_test',
'renderer_test',
+ 'scroll_engine_tests',
'storage_test',
'text_encoding_test',
'web_test',
@@ -185,25 +147,3 @@
'xhr_test',
'zip_unittests',
]
-
- def GetDefaultTargetBuildFile(self):
- return os.path.join(paths.BUILD_ROOT, 'all.gyp')
-
- def WebdriverBenchmarksEnabled(self):
- """Determines if webdriver benchmarks are enabled or not.
-
- Returns:
- True if webdriver benchmarks can run on this platform, False if not.
- """
- return False
-
- def GetDefaultSampleSize(self):
- return wb_config.STANDARD_SIZE
-
- def GetWebdriverBenchmarksTargetParams(self):
- """Gets command line params to pass to the Cobalt executable."""
- return []
-
- def GetWebdriverBenchmarksParams(self):
- """Gets command line params to pass to the webdriver benchmark script."""
- return []
diff --git a/cobalt/build/generate_data_header.py b/cobalt/build/generate_data_header.py
index 04fe02e..10dad4e 100755
--- a/cobalt/build/generate_data_header.py
+++ b/cobalt/build/generate_data_header.py
@@ -62,7 +62,7 @@
return ord(x) if is_py2 else x
output_string = ',\n'.join([
- ', '.join(['0x%02x' % GetByte(y)
+ ', '.join([f'0x{GetByte(y):02x}'
for y in x])
for x in Chunks(file_contents, 13)
])
@@ -95,7 +95,7 @@
elif os.path.isfile(path):
file_list.append(InputFile(path, os.path.dirname(path)))
else:
- raise ValueError('%s is not a file or directory.' % path)
+ raise ValueError(f'{path} is not a file or directory.')
return file_list
@@ -104,7 +104,7 @@
for input_file in input_files:
input_file_variable_name = input_file.GetVariableName()
- output_file.write('const unsigned char %s[] =\n' % input_file_variable_name)
+ output_file.write(f'const unsigned char {input_file_variable_name}[] =\n')
WriteFileDataToHeader(input_file.path, output_file)
@@ -127,16 +127,16 @@
for input_file in input_files:
# The lookup key will be the file path relative to the input directory
input_file_variable_name = input_file.GetVariableName()
- output_file.write(' out_map["%s"] = FileContents(%s, sizeof(%s));\n' %
- (input_file.GetRelativePath(), input_file_variable_name,
- input_file_variable_name))
+ output_file.write(f' out_map["{input_file.GetRelativePath()}"] = '
+ f'FileContents({input_file_variable_name}, '
+ f'sizeof({input_file_variable_name}));\n')
output_file.write('}\n\n')
def WriteHeader(namespace, output_file_name, files_to_concatenate):
"""Writes an embedded resource header to the given output filename."""
- with open(output_file_name, 'w') as output_file:
+ with open(output_file_name, 'w', encoding='utf-8') as output_file:
include_guard = '_COBALT_GENERATED_' + namespace.upper() + '_H_'
output_file.write('// Copyright 2014 The Cobalt Authors. '
'All Rights Reserved.\n'
@@ -159,7 +159,7 @@
if __name__ == '__main__':
if len(sys.argv) < 4:
- print('usage:\n %s <namespace> <output-file> <inputs...> \n' % sys.argv[0])
+ print(f'usage:\n {sys.argv[0]} <namespace> <output-file> <inputs...> \n')
print(__doc__)
sys.exit(1)
main(sys.argv[1], sys.argv[2], sys.argv[3:])
diff --git a/cobalt/build/get_build_id.py b/cobalt/build/get_build_id.py
index 14f4f0d..5e5c717 100755
--- a/cobalt/build/get_build_id.py
+++ b/cobalt/build/get_build_id.py
@@ -15,21 +15,49 @@
"""Prints out the Cobalt Build ID."""
import os
-from cobalt.build.build_number import GetOrGenerateNewBuildNumber
+import re
+import subprocess
+
+_FILE_DIR = os.path.dirname(__file__)
+COMMIT_COUNT_BUILD_NUMBER_OFFSET = 1000000
+
+# Matches numbers > 1000000. The pattern is basic so git log --grep is able to
+# interpret it.
+GIT_BUILD_NUMBER_PATTERN = r'[1-9]' + r'[0-9]' * 6 + r'[0-9]*'
+BUILD_NUMBER_TAG_PATTERN = r'^BUILD_NUMBER={}$'
+
+# git log --grep can't handle capture groups.
+BUILD_NUBER_PATTERN_WITH_CAPTURE = f'({GIT_BUILD_NUMBER_PATTERN})'
-def main():
- # Note $BUILD_ID_SERVER_URL will always be set in CI.
- build_id_server_url = os.environ.get('BUILD_ID_SERVER_URL')
- if build_id_server_url:
- build_num = GetOrGenerateNewBuildNumber(version_server=build_id_server_url)
- if build_num == 0:
- raise ValueError('The build number received was zero.')
- print(build_num)
- else:
- # No need to generate a build id for local builds.
- print('0')
+def get_build_number_from_commits(cwd=_FILE_DIR):
+ full_pattern = BUILD_NUMBER_TAG_PATTERN.format(GIT_BUILD_NUMBER_PATTERN)
+ output = subprocess.check_output(
+ ['git', 'log', '--grep', full_pattern, '-1', '--pretty=%b'],
+ cwd=cwd).decode()
+
+ full_pattern_with_capture = re.compile(
+ BUILD_NUMBER_TAG_PATTERN.format(BUILD_NUBER_PATTERN_WITH_CAPTURE),
+ flags=re.MULTILINE)
+ match = full_pattern_with_capture.search(output)
+ return match.group(1) if match else None
+
+
+def get_build_number_from_commit_count(cwd=_FILE_DIR):
+ output = subprocess.check_output(['git', 'rev-list', '--count', 'HEAD'],
+ cwd=cwd)
+ build_number = int(output.strip().decode('utf-8'))
+ return build_number + COMMIT_COUNT_BUILD_NUMBER_OFFSET
+
+
+def main(cwd=_FILE_DIR):
+ build_number = get_build_number_from_commits(cwd=cwd)
+
+ if not build_number:
+ build_number = get_build_number_from_commit_count(cwd=cwd)
+
+ return build_number
if __name__ == '__main__':
- main()
+ print(main())
diff --git a/cobalt/build/get_build_id_test.py b/cobalt/build/get_build_id_test.py
new file mode 100644
index 0000000..f9ea8c3
--- /dev/null
+++ b/cobalt/build/get_build_id_test.py
@@ -0,0 +1,111 @@
+#!/usr/bin/env python3
+#
+# Copyright 2023 The Cobalt Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"""Tests the get_build_id module."""
+
+import os
+import subprocess
+import tempfile
+import unittest
+
+from cobalt.build import get_build_id
+
+_TEST_BUILD_NUMBER = 1234 + get_build_id.COMMIT_COUNT_BUILD_NUMBER_OFFSET
+
+
+# TODO(b/282040638): fix and re-enabled this
+@unittest.skipIf(os.name == 'nt', 'Broken on Windows')
+class GetBuildIdTest(unittest.TestCase):
+
+ def setUp(self):
+ self.test_dir = tempfile.TemporaryDirectory() # pylint: disable=consider-using-with
+ self.original_cwd = os.getcwd()
+ os.chdir(self.test_dir.name)
+ subprocess.check_call(['git', 'init'])
+ subprocess.check_call(['git', 'config', 'user.name', 'pytest'])
+ subprocess.check_call(['git', 'config', 'user.email', 'pytest@pytest.com'])
+
+ def tearDown(self):
+ os.chdir(self.original_cwd)
+ self.test_dir.cleanup()
+
+ def make_commit(self, message='Temporary commit'):
+ with tempfile.NamedTemporaryFile('w', dir=self.test_dir.name) as temp_file:
+ subprocess.check_call(['git', 'add', temp_file.name])
+ subprocess.check_call(['git', 'commit', '-m', message])
+
+ def make_commit_with_build_number(self, build_number=_TEST_BUILD_NUMBER):
+ message = f'Subject line\n\nBUILD_NUMBER={build_number}'
+ self.make_commit(message)
+
+ def testSanity(self):
+ self.make_commit()
+ head_rev = subprocess.check_output(['git', 'rev-parse', 'HEAD'])
+ self.assertNotEqual(head_rev.strip().decode('utf-8'), '')
+
+ def testGetBuildNumberFromCommitsSunnyDay(self):
+ self.make_commit_with_build_number()
+ build_number = get_build_id.get_build_number_from_commits(
+ cwd=self.test_dir.name)
+ self.assertEqual(int(build_number), _TEST_BUILD_NUMBER)
+
+ def testGetBuildNumberFromCommitsSunnyDayGetMostRecent(self):
+ num_commits = 5
+ for i in range(num_commits):
+ self.make_commit_with_build_number(
+ get_build_id.COMMIT_COUNT_BUILD_NUMBER_OFFSET + i)
+ build_number = get_build_id.get_build_number_from_commits(
+ cwd=self.test_dir.name)
+ self.assertEqual(
+ int(build_number),
+ num_commits + get_build_id.COMMIT_COUNT_BUILD_NUMBER_OFFSET - 1)
+
+ def testGetBuildNumberFromCommitsRainyDayInvalidBuildNumber(self):
+ self.make_commit()
+ self.make_commit(f'BUILD_NUMBER={_TEST_BUILD_NUMBER}')
+ build_number = get_build_id.get_build_number_from_commits(
+ cwd=self.test_dir.name)
+ self.assertIsNone(build_number)
+
+ def testGetBuildNumberFromCommitCountSunnyDay(self):
+ num_commits = 5
+ for _ in range(num_commits):
+ self.make_commit()
+ build_number = get_build_id.get_build_number_from_commit_count(
+ cwd=self.test_dir.name)
+ self.assertEqual(
+ build_number,
+ num_commits + get_build_id.COMMIT_COUNT_BUILD_NUMBER_OFFSET)
+
+ def testCommitsOutrankCommitCount(self):
+ self.make_commit()
+ self.make_commit_with_build_number()
+ self.make_commit()
+ build_number = get_build_id.main(cwd=self.test_dir.name)
+ self.assertEqual(int(build_number), _TEST_BUILD_NUMBER)
+
+ def testFallbackToCommitCount(self):
+ num_commits = 5
+ for _ in range(num_commits):
+ self.make_commit()
+ build_number = get_build_id.main(cwd=self.test_dir.name)
+ self.assertEqual(
+ build_number,
+ num_commits + get_build_id.COMMIT_COUNT_BUILD_NUMBER_OFFSET)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/cobalt/build/gn.py b/cobalt/build/gn.py
index 973ec4c..7ff3bc4 100755
--- a/cobalt/build/gn.py
+++ b/cobalt/build/gn.py
@@ -37,15 +37,14 @@
if overwrite_args or not os.path.exists(dst_args_gn_file):
shutil.copy(src_args_gn_file, dst_args_gn_file)
- with open(dst_args_gn_file, 'a') as f:
+ with open(dst_args_gn_file, 'a', encoding='utf-8') as f:
f.write(f'build_type = "{build_type}"\n')
else:
print(f'{dst_args_gn_file} already exists.' +
' Running ninja will regenerate build files automatically.')
gn_command = [
- 'gn', '--script-executable={}'.format(sys.executable), 'gen',
- out_directory
+ 'gn', f'--script-executable={sys.executable}', 'gen', out_directory
] + gn_gen_args
print(' '.join(gn_command))
subprocess.check_call(gn_command)
diff --git a/cobalt/build/save_build_id.py b/cobalt/build/save_build_id.py
index cdd0d01..a6f3b5d 100755
--- a/cobalt/build/save_build_id.py
+++ b/cobalt/build/save_build_id.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# Copyright 2016 The Cobalt Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -20,10 +20,11 @@
import sys
import textwrap
-from cobalt.build import build_number
+from cobalt.build import get_build_id
+from cobalt.tools import paths
+_BUILD_ID_PATH = os.path.join(paths.BUILD_ROOT, 'build.id')
_SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
-_BUILD_ID_PATH = build_number.BUILD_ID_PATH
# Return values used by main().
RETVAL_SUCCESS = 0
@@ -63,20 +64,15 @@
return 0
if not options.build_id:
- build_id_server_url = os.environ.get('BUILD_ID_SERVER_URL')
- if build_id_server_url:
- options.build_id = build_number.GetOrGenerateNewBuildNumber(
- version_server=build_id_server_url)
- else:
- options.build_id = build_number.GetOrGenerateNewBuildNumber()
+ options.build_id = get_build_id.main()
if not options.build_id:
logging.error('Unable to retrieve build id.')
return RETVAL_ERROR
try:
- with open(_BUILD_ID_PATH, 'w') as build_id_file:
- build_id_file.write('{0}'.format(options.build_id))
+ with open(_BUILD_ID_PATH, 'w', encoding='utf-8') as build_id_file:
+ build_id_file.write(f'{options.build_id}')
except RuntimeError as e:
logging.error(e)
return RETVAL_ERROR
diff --git a/cobalt/build/sync_to_build_id.py b/cobalt/build/sync_to_build_id.py
deleted file mode 100755
index d96e4d0..0000000
--- a/cobalt/build/sync_to_build_id.py
+++ /dev/null
@@ -1,187 +0,0 @@
-#!/usr/bin/python2
-"""Syncs to a given Cobalt build id.
-
-Syncs current gclient instance to a given build id, as
-generated by "build_id.py" and stored on carbon-airlock-95823.
-"""
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-import argparse
-import json
-import os
-import shutil
-import subprocess
-import sys
-import requests
-
-_BUILD_ID_QUERY_URL = (
- "https://carbon-airlock-95823.appspot.com/build_version/search")
-_BUILD_ID_QUERY_PARAMETER_NAME = "build_number"
-
-
-class SubprocessFailedException(Exception):
- """Exception for non-zero subprocess exits."""
-
- def __init__(self, command):
- super(SubprocessFailedException, self).__init__() # pylint: disable=super-with-arguments
- self.command = command
-
- def __str__(self):
- return "Subprocess failed '{0}'".format(self.command)
-
-
-def _RunGitCommand(gitargs, **kwargs):
- """Runs a git command with "gitargs", returning the output splitlines().
-
- Args:
- gitargs: Commandline args that follow 'git'.
- **kwargs: Keyword args for Popen.
-
- Returns:
- All of stdout, as an array of lines.
-
- Raises:
- SubprocessFailedException: if the exit code is nonzero.
-
- """
- result_tuple = _RunGitCommandReturnExitCode(gitargs, **kwargs)
- if result_tuple[0] != 0:
- raise SubprocessFailedException(" ".join(["git"] + gitargs))
- return result_tuple[1]
-
-
-def _RunGitCommandReturnExitCode(gitargs, **kwargs):
- """Runs a git command with "gitargs", returning the exit code and output.
-
- Args:
- gitargs: Commandline args that follow 'git'.
- **kwargs: Keyword args for Popen.
-
- Returns:
- Tuple of (exit code, all of stdout as an array of lines).
-
- """
- popen_args = ["git"] + gitargs
- with subprocess.Popen(popen_args, stdout=subprocess.PIPE, **kwargs) as p:
- output = p.stdout.read().splitlines()
- return p.wait(), output
-
-
-def main():
- dev_null = open(os.devnull, "w") # pylint: disable=consider-using-with
- arg_parser = argparse.ArgumentParser(
- description="Syncs to a given Cobalt build id")
- arg_parser.add_argument("buildid", nargs=1)
- arg_parser.add_argument(
- "--force",
- default=False,
- action="store_true",
- help="Deletes directories that don't match the requested format.")
- args = arg_parser.parse_args()
- r = requests.get(
- _BUILD_ID_QUERY_URL,
- params={_BUILD_ID_QUERY_PARAMETER_NAME: args.buildid[0]})
- if not r.ok:
- print(
- "HTTP request failed\n{0} {1}\n{2}".format(r.status_code, r.reason,
- r.text),
- file=sys.stderr)
- return 1
- # The response starts with a security-related close expression line
- outer_json = json.loads(r.text.splitlines()[1])
- hashes = json.loads(outer_json["deps"])
- git_root = os.getcwd()
-
- for relpath, rep_hash in hashes.items():
- path = os.path.normpath(os.path.join(git_root, relpath))
- if not os.path.exists(path):
- # No warning in this case, we will attempt to clone the repository in
- # the next pass through the repos.
- continue
- is_dirty = (
- bool(
- _RunGitCommandReturnExitCode(["diff", "--no-ext-diff", "--quiet"],
- cwd=path,
- stderr=dev_null)[0]) or
- bool(
- _RunGitCommandReturnExitCode(
- ["diff", "--no-ext-diff", "--quiet", "--cached"],
- cwd=path,
- stderr=dev_null)[0]))
-
- if is_dirty:
- print("{0} is dirty, please resolve".format(relpath))
- return 1
-
- (requested_repo, _) = rep_hash.split("@")
- remote_url = _RunGitCommand(["config", "--get", "remote.origin.url"],
- cwd=path)[0].strip().decode("utf-8")
- if requested_repo.endswith(".git"):
- if remote_url + ".git" == requested_repo:
- print(("WARNING: You are syncing to {0} instead of {1}. While these "
- "point to the same repo, the differing extension will cause "
- "different build ids to be generated. If you need the same "
- "id, you'll need to specifically clone {0} (note the .git "
- "extension).").format(requested_repo, remote_url))
- remote_url += ".git"
-
- if remote_url != requested_repo:
- if args.force and path != git_root:
- shutil.rmtree(path)
- else:
- print(("{0} exists but does not point to the requested repo for that "
- "path, {1}. Either replace that directory manually or run this "
- "script with --force. --force will not try to remove the top "
- "level repository.").format(path, requested_repo))
- return 1
-
- for relpath, rep_hash in hashes.items():
- path = os.path.normpath(os.path.join(git_root, relpath))
-
- # repo_hash has a repo path prefix like this:
- # 'https://chromium.googlesource.com/chromium/llvm-project/libcxx.git
- # @48198f9110397fff47fe7c37cbfa296be7d44d3d'
- (requested_repo, requested_hash) = rep_hash.split("@")
-
- if not os.path.exists(path):
- print("Missing path {0}, cloning from {1}.".format(path, requested_repo))
- try:
- # The clone command will create all missing directories leading to the
- # path. If the clone is successful, we continue on as usual and let
- # the subsequent logic here checkout the appropriate git hash.
- _RunGitCommand(["clone", "-q", requested_repo, path])
- except SubprocessFailedException:
- print("There was an error cloning the repository.")
- continue
-
- current_hash = _RunGitCommand(["rev-parse", "HEAD"], cwd=path)[0]
-
- if requested_hash == current_hash:
- continue
- symbolic_ref = None
- try:
- symbolic_ref = _RunGitCommand(["symbolic-ref", "--short", "-q", "HEAD"],
- cwd=path,
- stderr=dev_null)[0]
- except SubprocessFailedException:
- pass
-
- user_visible_commit = symbolic_ref if symbolic_ref else current_hash[0:7]
-
- print("{0} was at {1} now {2}".format(path, user_visible_commit,
- requested_hash[0:7]))
-
- _RunGitCommand(["checkout", "-q", "--detach", requested_hash], cwd=path)
-
- return 0
-
-
-if __name__ == "__main__":
- try:
- sys.exit(main())
- except SubprocessFailedException as ex:
- print(str(ex), file=sys.stderr)
- sys.exit(1)
diff --git a/cobalt/codereview.settings b/cobalt/codereview.settings
new file mode 100644
index 0000000..a4b341a
--- /dev/null
+++ b/cobalt/codereview.settings
@@ -0,0 +1,4 @@
+# This file is used by gcl to get repository specific information.
+GERRIT_HOST: lbshell-internal-review.googlesource.com
+GERRIT_AUTODETECT_BRANCH: true
+CODE_REVIEW_SERVER: lbshell-internal-review.googlesource.com
diff --git a/cobalt/content/fonts/config/common/fonts.xml b/cobalt/content/fonts/config/common/fonts.xml
index abba022..2ddeaec 100644
--- a/cobalt/content/fonts/config/common/fonts.xml
+++ b/cobalt/content/fonts/config/common/fonts.xml
@@ -184,7 +184,7 @@
<font weight="400" style="normal">NotoSansArmenian-Regular.woff2</font>
<font weight="700" style="normal">NotoSansArmenian-Bold.woff2</font>
</family>
- <family lang="und-Geor,und-Geok" pages="0,5,16,45,254">
+ <family lang="und-Geor,und-Geok" pages="0,5,16,28,45,254">
<font weight="400" style="normal">NotoSansGeorgian-Regular.woff2</font>
<font weight="700" style="normal">NotoSansGeorgian-Bold.woff2</font>
</family>
diff --git a/cobalt/content/fonts/font_files/NotoSansGeorgian-Bold.woff2 b/cobalt/content/fonts/font_files/NotoSansGeorgian-Bold.woff2
index a7f9336..03a41c8 100644
--- a/cobalt/content/fonts/font_files/NotoSansGeorgian-Bold.woff2
+++ b/cobalt/content/fonts/font_files/NotoSansGeorgian-Bold.woff2
Binary files differ
diff --git a/cobalt/content/fonts/font_files/NotoSansGeorgian-Regular.woff2 b/cobalt/content/fonts/font_files/NotoSansGeorgian-Regular.woff2
index fb2fe49..68e26b4 100644
--- a/cobalt/content/fonts/font_files/NotoSansGeorgian-Regular.woff2
+++ b/cobalt/content/fonts/font_files/NotoSansGeorgian-Regular.woff2
Binary files differ
diff --git a/cobalt/content/fonts/scripts/filter_fonts.py b/cobalt/content/fonts/scripts/filter_fonts.py
index 74486da..4eca57e 100755
--- a/cobalt/content/fonts/scripts/filter_fonts.py
+++ b/cobalt/content/fonts/scripts/filter_fonts.py
@@ -80,9 +80,8 @@
elif category == '4':
return True
else:
- raise ValueError(
- 'Package category for "%s" must be between 0 and 4 (is: %s)' %
- (package_name, category))
+ raise ValueError(f'Package category for "{package_name}" must be between '
+ f'0 and 4 (is: {category})')
def FilterFonts(filter_function, node, families, fonts):
@@ -190,7 +189,7 @@
FilterFonts(filter_function, fonts_doc, kept_families, kept_fonts)
if options.output_xml:
- with open(options.output_xml, 'w') as f:
+ with open(options.output_xml, 'w', encoding='utf-8') as f:
f.write(fonts_doc.toprettyxml(indent=' '))
if options.fonts_dir:
diff --git a/cobalt/css_parser/BUILD.gn b/cobalt/css_parser/BUILD.gn
index 93c7458..d6a668c 100644
--- a/cobalt/css_parser/BUILD.gn
+++ b/cobalt/css_parser/BUILD.gn
@@ -97,6 +97,11 @@
"//nb",
"//starboard/common",
]
+
+ if (is_clang_16) {
+ # grammar_impl_generated.h variable 'yynerrs' set but not used
+ cflags_cc = [ "-Wno-unused-but-set-variable" ]
+ }
}
target(gtest_target_type, "css_parser_test") {
diff --git a/cobalt/debug/backend/debug_dispatcher.cc b/cobalt/debug/backend/debug_dispatcher.cc
index e6643c6..0e7a137 100644
--- a/cobalt/debug/backend/debug_dispatcher.cc
+++ b/cobalt/debug/backend/debug_dispatcher.cc
@@ -15,10 +15,12 @@
#include "cobalt/debug/backend/debug_dispatcher.h"
#include <string>
+#include <utility>
#include "base/bind.h"
#include "base/location.h"
#include "base/logging.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "base/values.h"
#include "cobalt/debug/debug_client.h"
@@ -30,7 +32,7 @@
DebugScriptRunner* script_runner)
: script_debugger_(script_debugger),
script_runner_(script_runner),
- task_runner_(base::MessageLoop::current()->task_runner()),
+ task_runner_(base::ThreadTaskRunnerHandle::Get()),
is_paused_(false),
// No manual reset, not initially signaled.
command_added_while_paused_(
diff --git a/cobalt/debug/command.h b/cobalt/debug/command.h
index 566d58a..fd63ddb 100644
--- a/cobalt/debug/command.h
+++ b/cobalt/debug/command.h
@@ -18,6 +18,7 @@
#include "base/logging.h"
#include "base/message_loop/message_loop.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "cobalt/debug/debug_client.h"
#include "cobalt/debug/json_object.h"
@@ -49,7 +50,7 @@
domain_(method_, 0, method_.find('.')),
json_params_(json_params),
callback_(response_callback),
- task_runner_(base::MessageLoop::current()->task_runner()),
+ task_runner_(base::ThreadTaskRunnerHandle::Get()),
response_sent_(false) {
DCHECK(!method_.empty());
DCHECK(!domain_.empty());
diff --git a/cobalt/debug/console/debug_hub.cc b/cobalt/debug/console/debug_hub.cc
index 41f3ce1..f87f175 100644
--- a/cobalt/debug/console/debug_hub.cc
+++ b/cobalt/debug/console/debug_hub.cc
@@ -16,11 +16,13 @@
#include <memory>
#include <set>
+#include <utility>
#include "base/files/file_path.h"
#include "base/files/file_util.h"
#include "base/json/json_writer.h"
#include "base/path_service.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "base/values.h"
#include "cobalt/base/c_val.h"
#include "cobalt/base/cobalt_paths.h"
@@ -164,8 +166,7 @@
void DebugHub::RunResponseCallback(
const scoped_refptr<ResponseCallbackInfo>& callback_info,
base::Optional<std::string> response) const {
- DCHECK_EQ(base::MessageLoop::current()->task_runner(),
- callback_info->task_runner);
+ DCHECK_EQ(base::ThreadTaskRunnerHandle::Get(), callback_info->task_runner);
callback_info->callback.value().Run(std::move(response));
}
diff --git a/cobalt/debug/console/debug_hub.h b/cobalt/debug/console/debug_hub.h
index da32546..ef5da93 100644
--- a/cobalt/debug/console/debug_hub.h
+++ b/cobalt/debug/console/debug_hub.h
@@ -15,12 +15,14 @@
#ifndef COBALT_DEBUG_CONSOLE_DEBUG_HUB_H_
#define COBALT_DEBUG_CONSOLE_DEBUG_HUB_H_
+#include <memory>
#include <string>
#include "base/callback.h"
#include "base/memory/ref_counted.h"
#include "base/message_loop/message_loop.h"
#include "base/optional.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "cobalt/debug/console/console_command.h"
#include "cobalt/debug/console/debug_console_mode.h"
#include "cobalt/debug/console/debugger_event_target.h"
@@ -63,7 +65,7 @@
ResponseCallbackInfo(DebugHub* const debugger,
const ResponseCallbackArg& cb)
: callback(debugger, cb),
- task_runner(base::MessageLoop::current()->task_runner()) {}
+ task_runner(base::ThreadTaskRunnerHandle::Get()) {}
ResponseCallbackArg::Reference callback;
scoped_refptr<base::SingleThreadTaskRunner> task_runner;
friend class base::RefCountedThreadSafe<ResponseCallbackInfo>;
@@ -97,7 +99,7 @@
const script::Sequence<ConsoleCommand> console_commands() const;
// Sends a console command to be handled in the context of the debug WebModule
- // by a registered hander. This lets the JavaScript debug console trigger
+ // by a registered handler. This lets the JavaScript debug console trigger
// actions in the app.
void SendConsoleCommand(const std::string& command,
const std::string& message);
diff --git a/cobalt/debug/console/debugger_event_target.cc b/cobalt/debug/console/debugger_event_target.cc
index 3b65b28..831b357 100644
--- a/cobalt/debug/console/debugger_event_target.cc
+++ b/cobalt/debug/console/debugger_event_target.cc
@@ -14,6 +14,7 @@
#include "cobalt/debug/console/debugger_event_target.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "cobalt/base/source_location.h"
namespace cobalt {
@@ -48,14 +49,14 @@
void DebuggerEventTarget::AddListener(
const DebuggerEventTarget::DebuggerEventCallbackArg& callback) {
base::AutoLock auto_lock(lock_);
- listeners_.insert(new ListenerInfo(
- this, callback, base::MessageLoop::current()->task_runner()));
+ listeners_.insert(
+ new ListenerInfo(this, callback, base::ThreadTaskRunnerHandle::Get()));
}
void DebuggerEventTarget::NotifyListener(
const DebuggerEventTarget::ListenerInfo* listener,
const std::string& method, const base::Optional<std::string>& json_params) {
- DCHECK_EQ(base::MessageLoop::current()->task_runner(), listener->task_runner);
+ DCHECK_EQ(base::ThreadTaskRunnerHandle::Get(), listener->task_runner);
listener->callback.value().Run(method, json_params);
}
diff --git a/cobalt/demos/content/BUILD.gn b/cobalt/demos/content/BUILD.gn
index e9f5d09..1d6ec9a 100644
--- a/cobalt/demos/content/BUILD.gn
+++ b/cobalt/demos/content/BUILD.gn
@@ -71,8 +71,6 @@
"media-element-demo/tsconfig.json",
"media-element-demo/webpack.config.js",
"media-query/media-query-test.html",
- "mtm-demo/mtm.html",
- "mtm-demo/normal.html",
"opacity-transitions-demo/opacity-transitions-demo.html",
"page-visibility-demo/page-visibility-demo.html",
"performance-api-demo/performance-lifecycle-timing-demo.html",
@@ -98,6 +96,7 @@
"splash_screen/redirected.html",
"splash_screen/render_postponed.html",
"system-caption-settings/index.html",
+ "text-encoding-workaround/text-encoding-workaround.html",
"timer-demo/timer-demo.html",
"transitions-demo/transitions-demo.html",
"transparent-animated-webp-demo/bottleflip_loader.webp",
@@ -111,6 +110,10 @@
if (is_internal_build) {
sources += [
+ "//internal/cobalt/demos/content/mtm-demo/README.txt",
+ "//internal/cobalt/demos/content/mtm-demo/mtm.html",
+ "//internal/cobalt/demos/content/mtm-demo/normal.html",
+ "//internal/cobalt/demos/content/mtm-demo/progressive.mp4",
"media-element-demo/public/assets/ac3.mp4",
"media-element-demo/public/assets/dash-audio.mp4",
"media-element-demo/public/assets/dash-video-1080p.mp4",
@@ -121,9 +124,6 @@
"media-element-demo/public/assets/hvc1_720p.mp4",
"media-element-demo/public/assets/hvc1_hdr_480p.mp4",
"media-element-demo/public/assets/progressive.mp4",
- "mtm-demo/README.txt",
- "mtm-demo/progressive.mp4",
- "text-encoding-workaround/text-encoding-workaround.html",
]
}
diff --git a/cobalt/demos/content/media-element-demo/package.json b/cobalt/demos/content/media-element-demo/package.json
index bed93af..d6e26e2 100644
--- a/cobalt/demos/content/media-element-demo/package.json
+++ b/cobalt/demos/content/media-element-demo/package.json
@@ -5,7 +5,7 @@
"scripts": {
"start": "npm run watch & npm run server",
"build": "webpack",
- "server": "http-server dist",
+ "server": "http-server -c-1 dist",
"watch": "webpack --watch"
},
"author": "",
diff --git a/cobalt/demos/content/media-element-demo/src/components/player.ts b/cobalt/demos/content/media-element-demo/src/components/player.ts
index 7bd9c17..60e3a5c 100644
--- a/cobalt/demos/content/media-element-demo/src/components/player.ts
+++ b/cobalt/demos/content/media-element-demo/src/components/player.ts
@@ -25,6 +25,18 @@
/** The <video> element. */
private videoEl!: HTMLVideoElement;
+ /** The video SourceBuffer */
+ private videoSourceBuffer!: SourceBuffer;
+
+ /** The audio SourceBuffer */
+ private audioSourceBuffer!: SourceBuffer;
+
+ /** max(videoSourceBuffer.writeHead - videoEl.currentTime) */
+ private maxVideoWriteHeadDistance!: number;
+
+ /** max(audioSourceBuffer.writeHead - videoEl.currentTime) */
+ private maxAudioWriteHeadDistance!: number;
+
/** The element displaying video download buffer info. */
private videoDownloadBufferInfo!: Element;
@@ -53,6 +65,8 @@
super(props);
this.videos = convertToMediaArray(props.video);
this.audios = convertToMediaArray(props.audio);
+ this.maxVideoWriteHeadDistance = 0;
+ this.maxAudioWriteHeadDistance = 0;
}
/** @override */
@@ -93,12 +107,37 @@
}
private renderVideoInfo() {
+ var h5vccAudioConnectors = '';
+ try {
+ h5vccAudioConnectors = this.videoEl.h5vccAudioConnectors;
+ } catch (error) {}
renderComponent(
VideoInfo, {
duration: this.videoEl.duration,
currentTime: this.videoEl.currentTime,
+ audioConnectors: h5vccAudioConnectors,
},
this.videoInfo);
+ if (this.videoSourceBuffer) {
+ this.maxVideoWriteHeadDistance =
+ Math.max(this.maxVideoWriteHeadDistance,
+ this.videoSourceBuffer.writeHead - this.videoEl.currentTime);
+ renderComponent(
+ SourceBufferInfo,
+ {name: 'Video', sourceBuffer: this.videoSourceBuffer,
+ maxWriteHeadDistance: this.maxVideoWriteHeadDistance},
+ this.videoSourceBufferInfo);
+ }
+ if (this.audioSourceBuffer) {
+ this.maxAudioWriteHeadDistance =
+ Math.max(this.maxAudioWriteHeadDistance,
+ this.audioSourceBuffer.writeHead - this.videoEl.currentTime);
+ renderComponent(
+ SourceBufferInfo,
+ {name: 'Audio', sourceBuffer: this.audioSourceBuffer,
+ maxWriteHeadDistance: this.maxAudioWriteHeadDistance},
+ this.audioSourceBufferInfo);
+ }
}
private async play() {
@@ -149,7 +188,7 @@
/**
* Plays all videos as adaptive videos.
- * TODO: dynmaically calculate the source buffer MIME.
+ * TODO: dynamically calculate the source buffer MIME.
*/
private playAdaptiveVideo() {
const ms = new MediaSource();
@@ -158,12 +197,7 @@
if (this.videos.length > 0) {
const videoSourceBuffer =
ms.addSourceBuffer('video/mp4; codecs="avc1.640028"');
- videoSourceBuffer.addEventListener('updateend', () => {
- renderComponent(
- SourceBufferInfo,
- {name: 'Video', sourceBuffer: videoSourceBuffer},
- this.videoSourceBufferInfo);
- });
+ this.videoSourceBuffer = videoSourceBuffer;
const downloadBuffer = new DownloadBuffer(this.videos);
downloadBuffer.register((reportMap) => {
renderComponent(
@@ -176,12 +210,7 @@
if (this.audios.length > 0) {
const audioSourceBuffer =
ms.addSourceBuffer('audio/mp4; codecs="mp4a.40.2"');
- audioSourceBuffer.addEventListener('updateend', () => {
- renderComponent(
- SourceBufferInfo,
- {name: 'Audio', sourceBuffer: audioSourceBuffer},
- this.audioSourceBufferInfo);
- });
+ this.audioSourceBuffer = audioSourceBuffer;
const downloadBuffer = new DownloadBuffer(this.audios);
downloadBuffer.register(
(reportMap) => {renderComponent(
diff --git a/cobalt/demos/content/media-element-demo/src/components/source_buffer_info.ts b/cobalt/demos/content/media-element-demo/src/components/source_buffer_info.ts
index 92343f3..6747f66 100644
--- a/cobalt/demos/content/media-element-demo/src/components/source_buffer_info.ts
+++ b/cobalt/demos/content/media-element-demo/src/components/source_buffer_info.ts
@@ -1,9 +1,12 @@
interface Props {
sourceBuffer: SourceBuffer;
name: string;
+ maxWriteHeadDistance: number;
}
/** A component that displays the source buffer info. */
-export function SourceBufferInfo({sourceBuffer, name}: Props) {
- return `<div>${name} buffered: ${sourceBuffer.buffered.end(0)} sec</div>`;
+export function SourceBufferInfo({sourceBuffer, name, maxWriteHeadDistance}: Props) {
+ return `<div>${name} buffered: ${sourceBuffer.buffered.end(0)} sec` +
+ `, writeHead: ${sourceBuffer.writeHead} sec` +
+ `, maxWriteHeadDistance: ${maxWriteHeadDistance} sec</div>`;
}
diff --git a/cobalt/demos/content/media-element-demo/src/components/video_info.ts b/cobalt/demos/content/media-element-demo/src/components/video_info.ts
index 3f8306f..68012c8 100644
--- a/cobalt/demos/content/media-element-demo/src/components/video_info.ts
+++ b/cobalt/demos/content/media-element-demo/src/components/video_info.ts
@@ -1,9 +1,13 @@
interface Props {
duration: number;
currentTime: number;
+ audioConnectors: string;
}
/** A component that displays video info. */
-export function VideoInfo({duration, currentTime}: Props) {
+export function VideoInfo({duration, currentTime, audioConnectors}: Props) {
+ if (audioConnectors) {
+ return `<div>${currentTime} / ${duration} / audioConnectors: ${audioConnectors}</div>`;
+ }
return `<div>${currentTime} / ${duration}</div>`;
}
diff --git a/cobalt/demos/content/mtm-demo/mtm.html b/cobalt/demos/content/mtm-demo/mtm.html
deleted file mode 100644
index eeee431..0000000
--- a/cobalt/demos/content/mtm-demo/mtm.html
+++ /dev/null
@@ -1,138 +0,0 @@
-<!DOCTYPE html>
-<html>
-
-<head>
- <title>Map-To-Mesh Demo</title>
-
- <style>
- #v {
- width: 100%;
- height: 100%;
- filter: map-to-mesh(equirectangular, 100deg 60deg,
- matrix3d(1, 0, 0, 0,
- 0, 1, 0, 0,
- 0, 0, 1, 0,
- 0, 0, 0, 1));
- }
-
- .instructions {
- position: absolute;
- left: 0;
- background-color: white;
- color: black;
- }
-
- @keyframes blink {
- from {background-color: white;}
- to {background-color: black;}
- }
-
- .spinner {
- position: absolute;
- left: 0;
- bottom: 0;
- animation: blink 1s infinite alternate;
- width: 20px;
- height: 20px;
- }
-
- #fps {
- position: absolute;
- left: 50px;
- bottom: 0;
- background-color: white;
- color: black;
- }
- </style>
-
- <script>
- var degreesPerSecond = 90;
- // The following mappings are done in this order:
- // Up, Down, Left, Right
-
- // Direction keys
- camera3D.createKeyMapping(38, camera3D.DOM_CAMERA_PITCH, degreesPerSecond);
- camera3D.createKeyMapping(40, camera3D.DOM_CAMERA_PITCH, -degreesPerSecond);
- camera3D.createKeyMapping(37, camera3D.DOM_CAMERA_YAW, degreesPerSecond);
- camera3D.createKeyMapping(39, camera3D.DOM_CAMERA_YAW, -degreesPerSecond);
-
- // DPAD
- camera3D.createKeyMapping(
- 0x800C, camera3D.DOM_CAMERA_PITCH, degreesPerSecond);
- camera3D.createKeyMapping(
- 0x800D, camera3D.DOM_CAMERA_PITCH, -degreesPerSecond);
- camera3D.createKeyMapping(
- 0x800E, camera3D.DOM_CAMERA_YAW, degreesPerSecond);
- camera3D.createKeyMapping(
- 0x800F, camera3D.DOM_CAMERA_YAW, -degreesPerSecond);
-
- // Left joystick
- camera3D.createKeyMapping(
- 0x8011, camera3D.DOM_CAMERA_PITCH, degreesPerSecond);
- camera3D.createKeyMapping(
- 0x8012, camera3D.DOM_CAMERA_PITCH, -degreesPerSecond);
- camera3D.createKeyMapping(
- 0x8013, camera3D.DOM_CAMERA_YAW, degreesPerSecond);
- camera3D.createKeyMapping(
- 0x8014, camera3D.DOM_CAMERA_YAW, -degreesPerSecond);
-
- // Right joystick
- camera3D.createKeyMapping(
- 0x8015, camera3D.DOM_CAMERA_PITCH, degreesPerSecond);
- camera3D.createKeyMapping(
- 0x8016, camera3D.DOM_CAMERA_PITCH, -degreesPerSecond);
- camera3D.createKeyMapping(
- 0x8017, camera3D.DOM_CAMERA_YAW, degreesPerSecond);
- camera3D.createKeyMapping(
- 0x8018, camera3D.DOM_CAMERA_YAW, -degreesPerSecond);
-
- // Update the frame rate counter at a regular interval.
- function UpdateFPS() {
- if ('h5vcc' in window && 'cVal' in window.h5vcc) {
- // Query Cobalt for the average amount of time between the start of
- // each frame. Translate that into a framerate and then update a
- // framerate counter on the window.
- var average_frame_time_in_us = window.h5vcc.cVal.getValue(
- 'Renderer.Rasterize.DurationInterval.Avg');
- if (!average_frame_time_in_us || average_frame_time_in_us <= 0) {
- // In older versions of Cobalt use a different name for the framerate
- // counter, so try falling back to that if the first fails.
- average_frame_time_in_us = window.h5vcc.cVal.getValue(
- 'Renderer.Rasterize.Duration.Avg');
- }
-
- if (average_frame_time_in_us && average_frame_time_in_us > 0) {
- // Convert frame time into frame rate (by taking the inverse).
- // We also multiply by 1000000 to convert from microseconds to
- // seconds.
- var average_frames_per_second =
- Math.round(1000000.0 / average_frame_time_in_us);
-
- // Update the display with our calculated frame rate.
- var fps_counter = document.getElementById('fps');
- fps_counter.innerHTML = 'FPS: ' + average_frames_per_second;
- }
- }
- window.setTimeout(UpdateFPS, 1000);
- }
- window.setTimeout(UpdateFPS, 1000);
- </script>
-</head>
-
-<body>
- <video autoplay loop id="v" src="progressive.mp4"></video>
- <div class="instructions">
- Use either the keyboard keys, direction keys, or analog joystick/thumbstick
- to look around.
- </div>
- <!-- The spinner is required in order to get around an implementation detail
- of the 'Renderer.Rasterize.DurationInterval.Avg' cval that we rely on
- for measuring the framerate. In a nutshell, that cval is only updated
- when a CSS animation is active, but not when a video is playing, even
- though both of these things result in a continual re-rasterization of
- the UI/video. -->
- <div class="spinner"></div>
- <div id="fps"></div>
-</body>
-
-</html>
diff --git a/cobalt/demos/content/mtm-demo/normal.html b/cobalt/demos/content/mtm-demo/normal.html
deleted file mode 100644
index 3d516f6..0000000
--- a/cobalt/demos/content/mtm-demo/normal.html
+++ /dev/null
@@ -1,23 +0,0 @@
-<!DOCTYPE html>
-<html>
-<head>
- <title>Normal Demo</title>
- <style>
- body {
- background-color: rgb(255, 255, 255);
- color: #0047ab;
- font-size: 100px;
- }
- .vid {
- margin: 100px;
- border: 10px solid blue;
- width: 960px;
- height: 540px;
- }
- </style>
-</head>
-<body>
- <div>Normal Demo</div>
- <video autoplay loop id="v" class="vid" src="progressive.mp4"></video>
-</body>
-</html>
diff --git a/cobalt/demos/content/text-encoding-workaround/text-encoding-workaround.html b/cobalt/demos/content/text-encoding-workaround/text-encoding-workaround.html
new file mode 100644
index 0000000..799fcc5
--- /dev/null
+++ b/cobalt/demos/content/text-encoding-workaround/text-encoding-workaround.html
@@ -0,0 +1,65 @@
+<!DOCTYPE html>
+<head>
+ <title>Text Encoding Workaround</title>
+ <style>
+ div {
+ background-color: rgb(200, 200, 200);
+ height: 50px;
+ }
+ </style>
+</head>
+<body>
+ <div id="text-decoded"> </div>
+ <div id="text-encoded"> </div>
+ <div id="large-string-encode"> </div>
+ <div id="large-string-decode"> </div>
+ <script>
+ /*
+ Text encoding is not natively supported by Cobalt.
+ There is work in progress to support it in Cobalt 22.lts.
+
+ In the mean time, a workaround is presented here, that could be
+ used to encode/decode to/from UTF-8.
+
+ The full IDL definition can be found at:
+ https://cobalt.googlesource.com/cobalt/+/refs/heads/21.lts.1+/src/cobalt/fetch/fetch_internal.idl,
+ ant it is available in 19.lts.1+ versions.
+
+ **WARNING:** As mentioned in the IDL file, the utility was not
+ meant to be public and should not be used outside of the fetch
+ implementation.
+
+ **WARNING:** Even when this is a native implementation it might still
+ not be ideal to handle very large strings due to its performance.
+
+ **WARNING:** DecodeFromUTF8 throws a simple TypeError exception
+ when the data provided is not a valid utf-8.
+ */
+
+ // Using FetchInternal.decodeFromUTF8
+ var arr = new Uint8Array(
+ [ 228, 189, 160, 229, 165, 189, 239, 188, 140, 228,
+ 184, 150, 231, 149, 140, 33, 32, 208, 159, 209,
+ 128, 208, 184, 208, 178, 208, 181, 209, 130, 32,
+ 208, 188, 208, 184, 209, 128, 33, 32, 72, 101,
+ 108, 108, 111, 32, 119, 111, 114, 108, 100, 33]);
+ var utf8_string = FetchInternal.decodeFromUTF8(arr);
+ document.getElementById("text-decoded").innerHTML = utf8_string;
+
+ // Using FetchInternal.encodeToUTF8
+ var str = "ä½ å¥½ï¼Œä¸–ç•Œ! Привет мир! Hello world!";
+ var u8_arr = FetchInternal.encodeToUTF8(str);
+ document.getElementById("text-encoded").innerHTML = u8_arr.toString();
+
+ // Trying very large strings;
+ // Note: This section takes a while for this to be completed.
+ var kSize = 1048576; // 8 x 1024 x 128
+ var char = "c";
+ var large_str = char.repeat(kSize);
+ var u8_large_arr = FetchInternal.encodeToUTF8(large_str);
+ document.getElementById("large-string-encode").innerHTML = u8_large_arr.toString();
+ var large_decoded_str = FetchInternal.decodeFromUTF8(u8_large_arr);
+ document.getElementById("large-string-decode").innerHTML = large_decoded_str;
+ </script>
+</body>
+</html>
diff --git a/cobalt/doc/cvals.md b/cobalt/doc/cvals.md
new file mode 100644
index 0000000..47eb55a
--- /dev/null
+++ b/cobalt/doc/cvals.md
@@ -0,0 +1,433 @@
+# Cobalt CVals
+
+## Overview
+
+CVals are globally accessible, thread-safe, key-value pairs within Cobalt, which
+are primarily used for monitoring state and tracking performance and memory.
+Each CVal is defined with a unique key and registered with the global
+CValManager. The CValManager can subsequently be queried for the current value
+of the key, which is returned as a string. CVals come in two varieties:
+
+* **PublicCVals** - active in all builds.
+* **DebugCVals** - only enabled in non-Gold builds.
+
+## Usage
+
+### Debug Console
+
+The debug console can be toggled between three modes (off, on, and hud) by
+pressing Ctrl-O. It displays the current values of registered CVals, updating
+them at 60 Hz. The initially displayed values are determined by
+`DEFAULT_ACTIVE_SET` in `debug_values/console_values.js`. However, this is
+modifiable in the debug console hud.
+
+The registered CVals can be viewed by entering `debug.cvalList()` into the debug
+console. Additional CVals can be registered via
+`debug.cvalAdd(substringToMatch)` and can be removed via
+`debug.cvalRemove(substringToMatch)`. The current registered list can be saved
+as a new set via `debug.cvalSave(set_key)`. At that point, the saved set can
+later be loaded as the active set via the command, `debug.cvalLoad(set_key)`.
+
+A full list of the commands available via the debug console is shown by entering
+`debug.help()`.
+
+### JavaScript
+
+CVals are exposed to JavaScript via the H5vccCVal object. This provides an
+interface for requesting all of the keys, which are returned as an array, and
+for retrieving a specific CVal value, which is returned as an optional string,
+via its CVal key. While the H5vccCVal object is usable in any build, only public
+CVals are queryable in Gold builds.
+
+Here are examples of its usage:
+
+```
+ > h5vcc.cVal.keys()
+ < H5vccCValKeyList[151]
+
+ > h5vcc.cVal.keys().length
+ < 151
+
+ > h5vcc.cVal.keys().item(8)
+ < "Count.MainWebModule.Layout.Box"
+
+ > h5vcc.cVal.getValue("Count.MainWebModule.Layout.Box")
+ < "463"
+```
+
+## Tracking Categories
+
+### Cobalt
+
+#### PublicCVals
+
+* **Cobalt.Lifetime** - The total number of milliseconds that Cobalt has been
+ running.
+
+#### DebugCVals
+
+* **Cobalt.Server.DevTools** - The IP address and port of the DevTools server,
+ if it is running.
+* **Cobalt.Server.WebDriver** - The IP address and port of the Webdriver
+ server, if it is running.
+
+### Count
+
+#### PublicCVals
+
+* **Count.DOM.EventListeners** - The total number of EventListeners in
+ existence globally. This includes ones that are pending garbage collection.
+* **Count.DOM.Nodes** - The total number of Nodes in existence globally. This
+ includes ones that are pending garbage collection.
+* **Count.MainWebModule.DOM.HtmlElement** - The total number of HtmlElements
+ in the MainWebModule. This includes elements that are not in the document
+ and ones that are pending garbage collection.
+* **Count.MainWebModule.DOM.HtmlElement.Document** - The total number of
+ HtmlElements that are in the MainWebModule’s document.
+* **Count.MainWebModule.DOM.HtmlScriptElement.Execute** - The total number of
+ HtmlScriptElement executions that have run since Cobalt started.
+* **Count.MainWebModule.Layout.Box** - The total number of Boxes that are in
+ the MainWebModule’s current layout.
+
+#### DebugCVals
+
+* **Count.DOM.ActiveJavaScriptEvents** - The number of JavaScript events that
+ are currently running.
+* **Count.DOM.Attrs** - The total number of Attrs in existence globally. This
+ includes ones that are pending garbage collection.
+* **Count.DOM.StringMaps** - The total number of StringMaps in existence
+ globally. This includes ones that are pending garbage collection.
+* **Count.DOM.TokenLists** - The total number of TokenLists in existence
+ globally. This includes ones that are pending garbage collection.
+* **Count.DOM.HtmlCollections** - The total number of HtmlCollections in
+ existence globally. This includes ones that are pending garbage collection.
+* **Count.DOM.NodeLists** - The total number of NodeLists in existence
+ globally. This includes ones that are pending garbage collection.
+* **Count.DOM.NodeMaps** - The total number of NodeMaps in existence globally.
+ This includes ones that are pending garbage collection.
+* **Count.MainWebModule.[RESOURCE_CACHE_TYPE].PendingCallbacks** - The number
+ of loading completed resources that have pending callbacks.
+* **Count.MainWebModule.[RESOURCE_CACHE_TYPE].Resource.Requested** - The total
+ number of resources that have ever been requested.
+* **Count.MainWebModule.[RESOURCE_CACHE_TYPE].Resource.Loaded** - The total
+ number of resources that have ever been successfully loaded.
+* **Count.MainWebModule.[RESOURCE_CACHE_TYPE].Resource.Loading** - The number
+ of resources that are currently loading.
+* **Count.Renderer.Rasterize.NewRenderTree** - The total number of new render
+ trees that have been rasterized.
+* **Count.VersionCompatibilityViolation** - The total number of Cobalt version
+ compatibility violations encountered.
+* **Count.XHR** - The total number of xhr::XMLHttpRequest in existence
+ globally.
+
+### Event
+
+The Event category currently consists of counts and durations (in microseconds)
+for input events, which are tracked from when the event is first injected into
+the window, until a render tree is generated from it. Each event type is tracked
+separately; current event types are: *KeyDown*, *KeyUp*, *PointerDown*,
+*PointerUp*.
+
+#### PublicCVals
+
+* **Event.MainWebModule.[EVENT_TYPE].ProducedRenderTree** - Whether or not the
+ event produced a render tree.
+* **Event.Count.MainWebModule.[EVENT_TYPE].DOM.HtmlElement** - The total
+ number of HTML elements after the event produces its first render tree.
+* **Event.Count.MainWebModule.[EVENT_TYPE].DOM.HtmlElement.Created** - The
+ number of HTML elements created during the event.
+* **Event.Count.MainWebModule.[EVENT_TYPE].DOM.HtmlElement.Destroyed** - The
+ number of HTML elements destroyed during the event. NOTE: This number will
+ only be non-zero if GC occurs during the event.
+* **Event.Count.MainWebModule.[EVENT_TYPE].DOM.HtmlElement.Document** - The
+ number of HTML elements in the document after the event produces its first
+ render tree.
+* **Event.Count.MainWebModule.[EVENT_TYPE].DOM.HtmlElement.Document.Added** -
+ The number of HTML elements added to the document during the event.
+* **Event.Count.MainWebModule.[EVENT_TYPE].DOM.HtmlElement.Document.Removed** -
+ The number of HTML elements removed from the document during the event.
+* **Event.Count.MainWebModule.[EVENT_TYPE].DOM.HtmlElement.UpdateMatchingRules** -
+ The number of HTML elements that had their matching rules updated during the
+ event.
+* **Event.Count.MainWebModule.[EVENT_TYPE].DOM.HtmlElement.UpdateComputedStyle** -
+ The number of HTML elements that had their computed style updated during the
+ event.
+* **Event.Count.MainWebModule.[EVENT_TYPE].DOM.HtmlElement.GenerateHtmlElementComputedStyle** -
+ The number of HTML elements that had their computed style fully generated
+ during the event.
+* **Event.Count.MainWebModule.[EVENT_TYPE].DOM.HtmlElement.GeneratePseudoElementComputedStyle** -
+ The number of pseudo elements that had their computed style fully generated
+ during the event.
+* **Event.Count.MainWebModule.[EVENT_TYPE].Layout.Box** - The total number of
+ Layout boxes after the event produces its first render tree.
+* **Event.Count.MainWebModule.[EVENT_TYPE].Layout.Box.Created** - The number
+ of Layout boxes that were created during the event.
+* **Event.Count.MainWebModule.[EVENT_TYPE].Layout.Box.Destroyed** - The number
+ of Layout boxes that were destroyed during the event.
+* **Event.Count.MainWebModule.[EVENT_TYPE].Layout.Box.UpdateSize** - The
+ number of times UpdateSize() is called during the event.
+* **Event.Count.MainWebModule.[EVENT_TYPE].Layout.Box.RenderAndAnimate** - The
+ number of times RenderAndAnimate() is called during the event.
+* **Event.Count.MainWebModule.[EVENT_TYPE].Layout.Box.UpdateCrossReferences** -
+ The number of times UpdateCrossReferences() is called during the event.
+* **Event.Duration.MainWebModule.[EVENT_TYPE]** - The total duration of the
+ event from the keypress being injected until the first render tree is
+ rendered. If the event does not trigger a re-layout, then it only includes
+ the event injection time.
+* **Event.Duration.MainWebModule.[EVENT_TYPE].DOM.InjectEvent** - The time
+ taken to inject the event into the window object. This mainly consists of
+ JavaScript time.
+* **Event.Duration.MainWebModule.[EVENT_TYPE].DOM.RunAnimationFrameCallbacks** -
+ The time taken to run animation frame callbacks during the event. This
+ mainly consists of JavaScript time.
+* **Event.Duration.MainWebModule.[EVENT_TYPE].DOM.UpdateComputedStyle** - The
+ time taken to update the computed styles of all HTML elements (which also
+ includes updating their matching rules). This will track closely with the
+ event DOM counts.
+* **Event.Duration.MainWebModule.[EVENT_TYPE].Layout.BoxTree** - The time
+ taken to fully lay out the box tree.
+* **Event.Duration.MainWebModule.[EVENT_TYPE].Layout.BoxTree.BoxGeneration** -
+ The time taken to generate the boxes within the box tree. This will track
+ closely with the number of boxes created during the event. This is included
+ within the BoxTree time.
+* **Event.Duration.MainWebModule.[EVENT_TYPE].Layout.BoxTree.UpdateUsedSizes** -
+ The time taken to update the sizes of the boxes within the box tree, which
+ is when all of the boxes are laid out. This is included within the BoxTree
+ time.
+* **Event.Duration.MainWebModule.[EVENT_TYPE].Layout.RenderAndAnimate** - The
+ time taken to generate the render tree produced by the event.
+* **Event.Duration.MainWebModule.[EVENT_TYPE].Renderer.Rasterize** - The time
+ taken to rasterize the event’s render tree after it is submitted to the
+ renderer.
+* **Event.Duration.MainWebModule.DOM.VideoStartDelay** - The delay from the
+ start of the event until the start of a video. NOTE1: This is not set until
+ after the event completes, so it is not included in the value dictionary.
+ NOTE2: This is not tracked by event type, so each new event will reset this
+ value.
+* **Event.Time.MainWebModule.[EVENT_TYPE].Start** - Time when the event
+ started.
+
+#### DebugCVals
+
+* **Event.MainWebModule.IsProcessing** - Whether or not an event is currently
+ being processed.
+* **Event.MainWebModule.[EVENT_TYPE].ValueDictionary** - All counts and
+ durations for this event as a JSON string. This is used by the
+ tv_testcase_event_recorder to minimize the number of CVal requests it makes
+ to retrieve all of the data for an event.
+
+### MainWebModule
+
+#### DebugCVals
+
+* **MainWebModule.IsRenderTreeRasterizationPending** - Whether or not a render
+ tree has been produced but not yet rasterized.
+* **MainWebModule.Layout.IsRenderTreePending** - Whether or not the layout is
+ scheduled to produce a new render tree.
+
+### Media
+
+#### PublicCVals
+
+We have various SbPlayer metrics available which need to be enabled first using:
+`h5vcc.settings.set("Media.EnableMetrics", 1)`
+
+* **Media.SbPlayerCreateTime.Minimum**
+* **Media.SbPlayerCreateTime.Median**
+* **Media.SbPlayerCreateTime.Maximum**
+* **Media.SbPlayerDestructionTime.Minimum**
+* **Media.SbPlayerDestructionTime.Median**
+* **Media.SbPlayerDestructionTime.Maximum**
+* **Media.SbPlayerWriteSamplesTime.Minimum**
+* **Media.SbPlayerWriteSamplesTime.Median**
+* **Media.SbPlayerWriteSamplesTime.Maximum**
+
+#### DebugCVals
+
+Media Pipeline exposes many State values, however we support multiple pipelines
+at once, so we have to query for the current pipeline number to access them.
+This can be done with `h5vcc.cVal.keys().filter(key =>
+key.startsWith("Media.Pipeline.") && key.endsWith("MaxVideoCapabilities") &&
+h5vcc.cVal.getValue(key).length === 0)` (If a Pipeline has MaxVideoCapabilities,
+it implies that the Pipeline is a 10x player, i.e. a secondary small player, so
+if MaxVideoCapabilities.length is 0, then it must be the primary pipeline.)
+This will give back an answer like `Media.Pipeline.3.MaxVideoCapabilities` so
+the main pipeline in this example is `3`. With the current pipeline number you
+can then access (Switching out pipeline for the one returned from previous
+query):
+
+* **Media.Pipeline.3.Started**
+* **Media.Pipeline.3.Suspended**
+* **Media.Pipeline.3.Stopped**
+* **Media.Pipeline.3.Ended**
+* **Media.Pipeline.3.PlayerState**
+* **Media.Pipeline.3.Volume**
+* **Media.Pipeline.3.PlaybackRate**
+* **Media.Pipeline.3.Duration**
+* **Media.Pipeline.3.LastMediaTime**
+* **Media.Pipeline.3.MaxVideoCapabilities**
+* **Media.Pipeline.3.SeekTime**
+* **Media.Pipeline.3.FirstWrittenAudioTimestamp**
+* **Media.Pipeline.3.FirstWrittenVideoTimestamp**
+* **Media.Pipeline.3.LastWrittenAudioTimestamp**
+* **Media.Pipeline.3.LastWrittenVideoTimestamp**
+* **Media.Pipeline.3.VideoWidth**
+* **Media.Pipeline.3.VideoHeight**
+* **Media.Pipeline.3.IsAudioEosWritten**
+* **Media.Pipeline.3.IsVideoEosWritten**
+* **Media.Pipeline.3.PipelineStatus**
+* **Media.Pipeline.3.CurrentCodec**
+* **Media.Pipeline.3.ErrorMessage**
+
+### Memory
+
+#### PublicCVals
+
+* **Memory.CPU.Free** - The total CPU memory (in bytes) potentially available
+ to this application minus the amount being used by the application.
+* **Memory.CPU.Used** - The total physical CPU memory (in bytes) used by this
+ application.
+* **Memory.GPU.Free** - The total GPU memory (in bytes) potentially available
+ to this application minus the amount being used by the application. NOTE: On
+ many platforms, GPU memory information is unavailable.
+* **Memory.GPU.Used** - The total physical CPU memory (in bytes) used by this
+ application. NOTE: On many platforms, GPU memory information is unavailable.
+* **Memory.JS** - The total memory being used by JavaScript.
+* **Memory.Font.LocalTypefaceCache.Capacity** - The capacity in bytes of the
+ font cache for use with local typefaces. This is a hard cap that can never
+ be exceeded.
+* **Memory.Font.LocalTypefaceCache.Size** - The current size in bytes of the
+ font cache for use with local typefaces.
+* **Memory.MainWebModule.DOM.HtmlScriptElement.Execute** - The total size in
+ bytes of all scripts executed by HtmlScriptElements since Cobalt started.
+* **Memory.MainWebModule.[RESOURCE_CACHE_TYPE].Capacity** - The capacity in
+ bytes of the resource cache specified by RESOURCE_CACHE_TYPE. When the
+ resource cache exceeds the capacity, unused resources being purged from it.
+* **Memory.MainWebModule.[RESOURCE_CACHE_TYPE].Resource.Loaded** - The total
+ size in bytes of all resources ever loaded by the resource cache specified
+ by RESOURCE_CACHE_TYPE.
+* **Memory.MainWebModule.[RESOURCE_CACHE_TYPE].Size** - The total number of
+ bytes currently used by the resource cache specified by RESOURCE_CACHE_TYPE.
+
+#### DebugCVals
+
+* **Memory.XHR** - The total memory allocated to xhr::XMLHttpRequest objects.
+* **Memory.CachedSoftwareRasterizer.CacheUsage** - Total memory occupied by
+ cached software-rasterized surfaces.
+* **Memory.CachedSoftwareRasterizer.FrameCacheUsage** - Total memory occupied
+ by cache software-rasterizer surfaces that were referenced this frame.
+
+### Renderer
+
+#### PublicCVals
+
+* **Renderer.HasActiveAnimations** - Whether or not the current render tree
+ has animations.
+* **Renderer.Rasterize.DurationInterval.\*** - Tracks the duration of
+ intervals between rasterization calls during all animations and updates its
+ stats with a new set of entries every 60 calls. Given that it only updates
+ every 60 samples, it typically includes multiple animations. This provides
+ an accurate view of the framerate over those samples and is the value used
+ with the FPS overlay.
+ * **Renderer.Rasterize.DurationInterval.Cnt** - The number of intervals
+ included in the stats. Should always be 60.
+ * **Renderer.Rasterize.DurationInterval.Avg** - The average duration of
+ the intervals between the animation rasterizations included in the set.
+ * **Renderer.Rasterize.DurationInterval.Min** - The minimum duration of
+ the intervals between the animation rasterizations included in the set.
+ * **Renderer.Rasterize.DurationInterval.Max** - The maximum duration of
+ the intervals between the animation rasterizations included in the set.
+ * **Renderer.Rasterize.DurationInterval.Pct.25th** - The 25th percentile
+ duration of the intervals between the animation rasterizations included
+ in the set.
+ * **Renderer.Rasterize.DurationInterval.Pct.50th** - The 50th percentile
+ duration of the intervals between the animation rasterizations included
+ in the set.
+ * **Renderer.Rasterize.DurationInterval.Pct.75th** - The 75th percentile
+ duration of the intervals between the animation rasterizations included
+ in the set.
+ * **Renderer.Rasterize.DurationInterval.Pct.95th** - The 95th percentile
+ duration of the intervals between the animation rasterizations included
+ in the set.
+ * **Renderer.Rasterize.DurationInterval.Std** - The standard deviation of
+ the intervals between the animation rasterizations included in the set.
+* **Renderer.Rasterize.AnimationsInterval.\*** - Tracks the duration of
+ intervals between rasterization calls during a single animation and updates
+ its stats when the animation ends. The stats include all of the animation’s
+ rasterization intervals. This provides an accurate view of the framerate
+ during the animation.
+ * **Renderer.Rasterize.AnimationsInterval.Cnt** - The number of intervals
+ included in the stats. Accounts for all rasterizations that occurred
+ during the animation.
+ * **Renderer.Rasterize.AnimationsInterval.Avg** - The average duration of
+ the intervals between the animation rasterizations included in the set.
+ * **Renderer.Rasterize.AnimationsInterval.Min** - The minimum duration of
+ the intervals between the animation rasterizations included in the set.
+ * **Renderer.Rasterize.AnimationsInterval.Max** - The maximum duration of
+ the intervals between the animation rasterizations included in the set.
+ * **Renderer.Rasterize.AnimationsInterval.Pct.25th** - The 25th percentile
+ duration of the intervals between the animation rasterizations included
+ in the set.
+ * **Renderer.Rasterize.AnimationsInterval.Pct.50th** - The 50th percentile
+ duration of the intervals between the animation rasterizations included
+ in the set.
+ * **Renderer.Rasterize.AnimationsInterval.Pct.75th** - The 75th percentile
+ duration of the intervals between the animation rasterizations included
+ in the set.
+ * **Renderer.Rasterize.AnimationsInterval.Pct.95th** - The 95th percentile
+ duration of the intervals between the animation rasterizations included
+ in the set.
+ * **Renderer.Rasterize.AnimationsInterval.Std** - The standard deviation
+ of the intervals between the animation rasterizations included in the
+ set.
+
+#### DebugCVals
+
+* **Renderer.SubmissionQueueSize** - The current size of the renderer
+ submission queue. Each item in the queue contains a render tree and
+ associated animations.
+* **Renderer.ToSubmissionTime** - The current difference in milliseconds
+ between the layout's clock and the renderer's clock.
+* **Renderer.Rasterize.Animations.\*** - Tracks the duration of each
+ rasterization call during a single animation and updates its stats when the
+ animation ends. The stats are drawn from all of the animation’s
+ rasterizations. Given that this only tracks the time spent in the
+ rasterizer, it does not provide as accurate a picture of the framerate as
+ DurationInterval and AnimationsInterval.
+ * **Renderer.Rasterize.Animations.Cnt** - The number of rasterization
+ durations included in the stats. Accounts for all rasterizations that
+ occurred during the animation.
+ * **Renderer.Rasterize.Animations.Avg** - The average duration of the
+ rasterizations included in the set.
+ * **Renderer.Rasterize.Animations.Min** - The minimum duration of the
+ rasterizations included in the set.
+ * **Renderer.Rasterize.Animations.Max** - The maximum duration of the
+ rasterizations included in the set.
+ * **Renderer.Rasterize.Animations.Pct.25th** - The 25th percentile
+ duration of the rasterizations included in the set.
+ * **Renderer.Rasterize.Animations.Pct.50th** - The 50th percentile
+ duration of the rasterizations included in the set.
+ * **Renderer.Rasterize.Animations.Pct.75th** - The 75th percentile
+ duration of the rasterizations included in the set.
+ * **Renderer.Rasterize.Animations.Pct.95th** - The 95th percentile
+ duration of the rasterizations included in the set.
+ * **Renderer.Rasterize.Animations.Std** - The standard deviation of the
+ rasterization durations included in the set.
+
+### Time
+
+#### PublicCVals
+
+* **Time.Cobalt.Start** - Time when Cobalt was launched.
+* **Time.Browser.Navigate** - Time when the BrowserModule’s last Navigate
+ occurred.
+* **Time.Browser.OnLoadEvent** - Time when the BrowserModule’s last
+ OnLoadEvent occurred.
+* **Time.MainWebModule.DOM.HtmlScriptElement.Execute** - Time when an
+ HtmlScriptElement was last executed.
+* **Time.Renderer.Rasterize.Animations.Start** - Time when the Renderer last
+ started playing animations.
+* **Time.Renderer.Rasterize.Animations.End** - Time when the Renderer last
+ stopped playing animations.
+* **Time.Renderer.Rasterize.NewRenderTree** - Time when the most recent render
+ tree was first rasterized.
diff --git a/cobalt/doc/device_authentication.md b/cobalt/doc/device_authentication.md
index 297f82b..6ef84a5 100644
--- a/cobalt/doc/device_authentication.md
+++ b/cobalt/doc/device_authentication.md
@@ -35,7 +35,7 @@
since it enables implementations where the key exists only in secure hardware
and never enters the system's main memory. A reference implementation, which
depends on BoringSSL exists at
-[starboard/linux/x64x11/internal/system_sign_with_certification_secret_key.cc](../../starboard/linux/x64x11/internal/system_sign_with_certification_secret_key.cc).
+[internal/starboard/linux/x64x11/internal/system_sign_with_certification_secret_key.cc](../../internal/starboard/linux/x64x11/internal/system_sign_with_certification_secret_key.cc).
### Cobalt signing
diff --git a/cobalt/doc/docker_build.md b/cobalt/doc/docker_build.md
index 4ba59a9..a65e17d 100644
--- a/cobalt/doc/docker_build.md
+++ b/cobalt/doc/docker_build.md
@@ -54,7 +54,19 @@
## Pre-built images
-Note: Pre-built images from a public container registry are not yet available.
+Pre-built images are available at https://github.com/orgs/youtube/packages?repo_name=cobalt
+
+For example, a container for building Android platform from main branch can be pulled as follows:
+
+```
+docker pull ghcr.io/youtube/cobalt/cobalt-build-android:main
+```
+
+Similarly, from LTS branch for Evergreen platform:
+
+```
+docker pull ghcr.io/youtube/cobalt/cobalt-build-evergreen:23.lts
+```
## Troubleshooting
@@ -63,4 +75,4 @@
`docker-compose run linux-x64x11 /bin/bash`
-and try to build cobalt [with the usual `gyp / ninja` flow](../../README.md#building-and-running-the-code).
+and try to build cobalt [with the usual `gn / ninja` flow](../../README.md#building-and-running-the-code).
diff --git a/cobalt/doc/voice_search.md b/cobalt/doc/voice_search.md
index d4bd6ec..6a72bee 100644
--- a/cobalt/doc/voice_search.md
+++ b/cobalt/doc/voice_search.md
@@ -1,15 +1,10 @@
# Enabling voice search in Cobalt
-Cobalt enables voice search through either:
+Cobalt enables voice search through a subset of the
+[MediaRecorder Web API](https://www.w3.org/TR/mediastream-recording/#mediarecorder-api)
-1. A subset of the [MediaRecorder Web API](https://www.w3.org/TR/mediastream-recording/#mediarecorder-api)
-2. A subset of the [Speech Recognition Web API](https://w3c.github.io/speech-api/#speechreco-section)
-
-Only one or the other can be used, and we recommend that the MediaRecorder API
-is followed, as the Speech Recognition API is deprecated as of Starboard 13.
-
-In both approaches, in order to check whether to enable voice control or not,
-web apps will call the [MediaDevices.enumerateDevices()](https://www.w3.org/TR/mediacapture-streams/#dom-mediadevices-enumeratedevices%28%29)
+In order to check whether to enable voice control or not, web apps will call the
+[MediaDevices.enumerateDevices()](https://www.w3.org/TR/mediacapture-streams/#dom-mediadevices-enumeratedevices%28%29)
Web API function within which Cobalt will in turn call a subset of the
[Starboard SbMicrophone API](../../starboard/microphone.h).
@@ -19,48 +14,7 @@
## MediaRecorder API
To enable the MediaRecorder API in Cobalt, the complete
-[SbMicrophone API](../../starboard/microphone.h) must be implemented, and
-`SbSpeechRecognizerIsSupported()` must return `false`.
-
-## Speech Recognition API - Deprecated
-
-**The Speech Recognition API is deprecated as of Starboard 13.**
-
-In order to provide support for using this API, platforms must implement the
-[Starboard SbSpeechRecognizer API](../../starboard/speech_recognizer.h) as well
-as a subset of the [SbMicrophone API](../../starboard/microphone.h).
-
-### Specific instructions to enable voice search
-
-1. Implement `SbSpeechRecognizerIsSupported()` to return `true`, and implement
- the [SbSpeechRecognizer API](../../starboard/speech_recognizer.h).
-2. Implement the following subset of the
- [SbMicrophone API](../../starboard/microphone.h):
- - `SbMicrophoneGetAvailable()`
- - `SbMicrophoneCreate()`
- - `SbMicrophoneDestroy()`
-
- In particular, SbMicrophoneCreate() must return a valid microphone. It is
- okay to stub out the other functions, e.g. have `SbMicrophoneOpen()`
- return `false`.
-3. The YouTube app will display the mic icon on the search page when it detects
- valid microphone input devices using `MediaDevices.enumerateDevices()`.
-4. With `SbSpeechRecognizerIsSupported()` implemented to return `true`, Cobalt
- will use the platform's
- [Starboard SbSpeechRecognizer API](../../starboard/speech_recognizer.h)
- implementation, and it will not actually read directly from the microphone
- via the [Starboard SbMicrophone API](../../starboard/microphone.h).
-
-### Differences from versions of Cobalt <= 11
-
-In previous versions of Cobalt, there was no way to dynamically disable
-speech support besides modifying common Cobalt code to dynamically stub out the
-Speech Recognition API when the platform does not support microphone input.
-This is no longer necessary, web apps should now rely on
-`MediaDevices.enumerateDevices()` to determine whether voice support is enabled
-or not.
-
-### Speech Recognition API is deprecated in Starboard 13 ###
+[SbMicrophone API](../../starboard/microphone.h) must be implemented.
Web applications are expected to use the MediaRecorder API. This in turn relies
on the SbMicrophone API as detailed above.
diff --git a/cobalt/dom/document.cc b/cobalt/dom/document.cc
index c35472c..b3d1ca0 100644
--- a/cobalt/dom/document.cc
+++ b/cobalt/dom/document.cc
@@ -23,6 +23,7 @@
#include "base/compiler_specific.h"
#include "base/message_loop/message_loop.h"
#include "base/strings/string_util.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "base/trace_event/trace_event.h"
#include "cobalt/base/token.h"
#include "cobalt/base/tokens.h"
@@ -603,7 +604,7 @@
DCHECK(base::MessageLoop::current());
should_dispatch_load_event_ = false;
- base::MessageLoop::current()->task_runner()->PostTask(
+ base::ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE, base::Bind(&Document::DispatchOnLoadEvent,
base::AsWeakPtr<Document>(this)));
diff --git a/cobalt/dom/event_queue.cc b/cobalt/dom/event_queue.cc
index 1bd5282..f12e2aa 100644
--- a/cobalt/dom/event_queue.cc
+++ b/cobalt/dom/event_queue.cc
@@ -16,13 +16,14 @@
#include "base/bind.h"
#include "base/logging.h"
+#include "base/threading/thread_task_runner_handle.h"
namespace cobalt {
namespace dom {
EventQueue::EventQueue(web::EventTarget* event_target)
: event_target_(event_target),
- message_loop_(base::MessageLoop::current()->task_runner()) {
+ message_loop_(base::ThreadTaskRunnerHandle::Get()) {
DCHECK(event_target_);
DCHECK(message_loop_);
}
diff --git a/cobalt/dom/html_element.cc b/cobalt/dom/html_element.cc
index 36ad437..01ea1eb 100644
--- a/cobalt/dom/html_element.cc
+++ b/cobalt/dom/html_element.cc
@@ -22,6 +22,7 @@
#include "base/lazy_instance.h"
#include "base/message_loop/message_loop_task_runner.h"
#include "base/strings/string_number_conversions.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "cobalt/base/console_log.h"
#include "cobalt/base/tokens.h"
#include "cobalt/cssom/absolute_url_value.h"
@@ -2220,13 +2221,13 @@
ui_nav_item_ = new ui_navigation::NavItem(
*ui_nav_item_type,
base::Bind(
- &UiNavCallbackHelper, base::MessageLoop::current()->task_runner(),
+ &UiNavCallbackHelper, base::ThreadTaskRunnerHandle::Get(),
base::Bind(&HTMLElement::OnUiNavBlur, base::AsWeakPtr(this))),
base::Bind(
- &UiNavCallbackHelper, base::MessageLoop::current()->task_runner(),
+ &UiNavCallbackHelper, base::ThreadTaskRunnerHandle::Get(),
base::Bind(&HTMLElement::OnUiNavFocus, base::AsWeakPtr(this))),
base::Bind(
- &UiNavCallbackHelper, base::MessageLoop::current()->task_runner(),
+ &UiNavCallbackHelper, base::ThreadTaskRunnerHandle::Get(),
base::Bind(&HTMLElement::OnUiNavScroll, base::AsWeakPtr(this))));
ui_nav_item_->SetDir(ui_nav_item_dir);
if (ui_nav_focus_duration_) {
diff --git a/cobalt/dom/html_link_element.cc b/cobalt/dom/html_link_element.cc
index d01d0e0..efd1019 100644
--- a/cobalt/dom/html_link_element.cc
+++ b/cobalt/dom/html_link_element.cc
@@ -21,6 +21,7 @@
#include "base/bind.h"
#include "base/strings/string_tokenizer.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "base/trace_event/trace_event.h"
#include "cobalt/cssom/css_parser.h"
#include "cobalt/cssom/css_style_sheet.h"
@@ -268,7 +269,7 @@
// GetLoadTimingInfo and create resource timing before loader released.
GetLoadTimingInfoAndCreateResourceTiming();
- base::MessageLoop::current()->task_runner()->PostTask(
+ base::ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE, base::Bind(&HTMLLinkElement::ReleaseLoader, this));
if (!error) return;
diff --git a/cobalt/dom/html_media_element.cc b/cobalt/dom/html_media_element.cc
index aaa4675..c3376ea 100644
--- a/cobalt/dom/html_media_element.cc
+++ b/cobalt/dom/html_media_element.cc
@@ -19,6 +19,7 @@
#include <limits>
#include <memory>
#include <utility>
+#include <vector>
#include "base/bind.h"
#include "base/compiler_specific.h"
@@ -26,6 +27,8 @@
#include "base/lazy_instance.h"
#include "base/logging.h"
#include "base/message_loop/message_loop.h"
+#include "base/strings/string_util.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "base/trace_event/trace_event.h"
#include "cobalt/base/instance_counter.h"
#include "cobalt/base/tokens.h"
@@ -640,6 +643,24 @@
event_queue_.Enqueue(event);
}
+std::string HTMLMediaElement::h5vcc_audio_connectors(
+ script::ExceptionState* exception_state) const {
+#if SB_API_VERSION >= 15
+ if (!player_) {
+ web::DOMException::Raise(web::DOMException::kInvalidStateErr,
+ exception_state);
+ return std::string();
+ }
+
+ std::vector<std::string> configs = player_->GetAudioConnectors();
+ return base::JoinString(configs, ";");
+#else // SB_API_VERSION >= 15
+ web::DOMException::Raise(web::DOMException::kNotSupportedErr,
+ exception_state);
+ return std::string();
+#endif // SB_API_VERSION >= 15
+}
+
void HTMLMediaElement::CreateMediaPlayer() {
TRACE_EVENT0("cobalt::dom", "HTMLMediaElement::CreateMediaPlayer()");
LOG(INFO) << "Create media player.";
@@ -890,7 +911,7 @@
request_mode_ = GetRequestMode(GetAttribute("crossOrigin"));
DCHECK(node_document()->location());
std::unique_ptr<DataSource> data_source(new media::URLFetcherDataSource(
- base::MessageLoop::current()->task_runner(), url, csp_callback,
+ base::ThreadTaskRunnerHandle::Get(), url, csp_callback,
html_element_context()->fetcher_factory()->network_module(),
request_mode_, node_document()->location()->GetOriginAsObject()));
player_->LoadProgressive(url, std::move(data_source));
diff --git a/cobalt/dom/html_media_element.h b/cobalt/dom/html_media_element.h
index 4276fd8..612e21c 100644
--- a/cobalt/dom/html_media_element.h
+++ b/cobalt/dom/html_media_element.h
@@ -146,6 +146,12 @@
// function won't modify the target of the |event| passed in.
void ScheduleEvent(const scoped_refptr<web::Event>& event);
+ // Returns semicolon separated names of audio connectors, like
+ // "hdmi;bluetooth".
+ // TODO(b/267678497): The current interface is tentative, to be refined.
+ std::string h5vcc_audio_connectors(
+ script::ExceptionState* exception_state) const;
+
// Set max video capabilities.
void SetMaxVideoCapabilities(const std::string& max_video_capabilities,
script::ExceptionState* exception_state);
diff --git a/cobalt/dom/html_media_element.idl b/cobalt/dom/html_media_element.idl
index 4a1a509..401060a 100644
--- a/cobalt/dom/html_media_element.idl
+++ b/cobalt/dom/html_media_element.idl
@@ -61,4 +61,10 @@
attribute boolean controls;
[RaisesException] attribute double volume;
attribute boolean muted;
+
+ // non standard, semicolon separated names of audio connectors, like
+ // "hdmi;bluetooth". It raises `NotSupportedError` on apps doesn't support
+ // this feature, or `InvalidStateError` if there isn't an active playback.
+ // TODO(b/267678497): The current interface is tentative, to be refined.
+ [RaisesException] readonly attribute DOMString h5vccAudioConnectors;
};
diff --git a/cobalt/dom/html_script_element.cc b/cobalt/dom/html_script_element.cc
index 1a1f7ba..fa4153e 100644
--- a/cobalt/dom/html_script_element.cc
+++ b/cobalt/dom/html_script_element.cc
@@ -21,6 +21,7 @@
#include "base/bind.h"
#include "base/compiler_specific.h"
#include "base/strings/string_util.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "base/trace_event/trace_event.h"
#include "cobalt/base/console_log.h"
#include "cobalt/base/tokens.h"
@@ -607,7 +608,7 @@
}
// Post a task to release the loader.
- base::MessageLoop::current()->task_runner()->PostTask(
+ base::ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE, base::Bind(&HTMLScriptElement::ReleaseLoader, this));
}
diff --git a/cobalt/dom/keyboard_event.cc b/cobalt/dom/keyboard_event.cc
index ca4fd9c..892dad9 100644
--- a/cobalt/dom/keyboard_event.cc
+++ b/cobalt/dom/keyboard_event.cc
@@ -324,11 +324,11 @@
case keycode::kBrowserHome:
return "BrowserHome";
case keycode::kVolumeMute:
- return "VolumeMute";
+ return "AudioVolumeMute";
case keycode::kVolumeDown:
- return "VolumeMute";
+ return "AudioVolumeDown";
case keycode::kVolumeUp:
- return "VolumeMute";
+ return "AudioVolumeUp";
case keycode::kMediaNextTrack:
return "MediaNextTrack";
case keycode::kMediaPrevTrack:
diff --git a/cobalt/dom/lottie_player.cc b/cobalt/dom/lottie_player.cc
index 4adb4a8..10e2397 100644
--- a/cobalt/dom/lottie_player.cc
+++ b/cobalt/dom/lottie_player.cc
@@ -19,6 +19,7 @@
#include <utility>
#include "base/strings/string_number_conversions.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "base/trace_event/trace_event.h"
#include "cobalt/base/polymorphic_downcast.h"
#include "cobalt/dom/document.h"
@@ -41,7 +42,7 @@
: HTMLElement(document, base::Token(kTagName)),
autoplaying_(true),
ALLOW_THIS_IN_INITIALIZER_LIST(event_queue_(this)),
- callback_task_runner_(base::MessageLoop::current()->task_runner()) {
+ callback_task_runner_(base::ThreadTaskRunnerHandle::Get()) {
SetAnimationEventCallbacks();
}
diff --git a/cobalt/dom/media_source.cc b/cobalt/dom/media_source.cc
index 8871b83..31ffc95 100644
--- a/cobalt/dom/media_source.cc
+++ b/cobalt/dom/media_source.cc
@@ -53,6 +53,7 @@
#include "base/guid.h"
#include "base/logging.h"
#include "base/single_thread_task_runner.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "base/trace_event/trace_event.h"
#include "cobalt/base/polymorphic_downcast.h"
#include "cobalt/base/tokens.h"
@@ -423,7 +424,7 @@
offload_algorithm_runner_.reset(
new OffloadAlgorithmRunner<SourceBufferAlgorithm>(
algorithm_process_thread_->message_loop()->task_runner(),
- base::MessageLoop::current()->task_runner()));
+ base::ThreadTaskRunnerHandle::Get()));
} else {
LOG(INFO) << "Algorithm offloading disabled.";
}
diff --git a/cobalt/dom/media_source.h b/cobalt/dom/media_source.h
index 0daa77a..9af3f55 100644
--- a/cobalt/dom/media_source.h
+++ b/cobalt/dom/media_source.h
@@ -51,6 +51,7 @@
#include "base/memory/ref_counted.h"
#include "base/memory/weak_ptr.h"
#include "base/threading/thread.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "cobalt/base/token.h"
#include "cobalt/dom/audio_track.h"
#include "cobalt/dom/event_queue.h"
diff --git a/cobalt/dom/mutation_observer_task_manager.cc b/cobalt/dom/mutation_observer_task_manager.cc
index 5453873..9c5196a 100644
--- a/cobalt/dom/mutation_observer_task_manager.cc
+++ b/cobalt/dom/mutation_observer_task_manager.cc
@@ -16,6 +16,7 @@
#include "base/callback.h"
#include "base/message_loop/message_loop.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "base/trace_event/trace_event.h"
#include "cobalt/dom/mutation_observer.h"
@@ -55,7 +56,7 @@
// 2. Set mutation observer compound microtask queued flag.
task_posted_ = true;
// 3. Queue a compound microtask to notify mutation observers.
- base::MessageLoop::current()->task_runner()->PostTask(
+ base::ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE,
base::Bind(&MutationObserverTaskManager::NotifyMutationObservers,
base::Unretained(this)));
diff --git a/cobalt/dom/screenshot_manager.cc b/cobalt/dom/screenshot_manager.cc
index 0483259..b8f3eeb 100644
--- a/cobalt/dom/screenshot_manager.cc
+++ b/cobalt/dom/screenshot_manager.cc
@@ -17,6 +17,7 @@
#include <memory>
#include <utility>
+#include "base/threading/thread_task_runner_handle.h"
#include "base/time/time.h"
#include "cobalt/dom/screenshot.h"
#include "cobalt/render_tree/node.h"
@@ -45,10 +46,9 @@
// We want to ScreenshotManager::FillScreenshot, on this thread.
base::Callback<void(std::unique_ptr<uint8[]>, const math::Size&)>
- fill_screenshot = base::Bind(&ScreenshotManager::FillScreenshot,
- base::Unretained(this), next_ticket_id_,
- base::MessageLoop::current()->task_runner(),
- desired_format);
+ fill_screenshot = base::Bind(
+ &ScreenshotManager::FillScreenshot, base::Unretained(this),
+ next_ticket_id_, base::ThreadTaskRunnerHandle::Get(), desired_format);
bool was_emplaced =
ticket_to_screenshot_promise_map_
.emplace(next_ticket_id_, std::move(promise_reference))
diff --git a/cobalt/dom/serialized_algorithm_runner.h b/cobalt/dom/serialized_algorithm_runner.h
index db625ad..0d8122a 100644
--- a/cobalt/dom/serialized_algorithm_runner.h
+++ b/cobalt/dom/serialized_algorithm_runner.h
@@ -15,6 +15,7 @@
#ifndef COBALT_DOM_SERIALIZED_ALGORITHM_RUNNER_H_
#define COBALT_DOM_SERIALIZED_ALGORITHM_RUNNER_H_
+#include <algorithm>
#include <memory>
#include <utility>
@@ -24,8 +25,10 @@
#include "base/memory/ref_counted.h"
#include "base/message_loop/message_loop.h"
#include "base/single_thread_task_runner.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "base/trace_event/trace_event.h"
#include "starboard/common/mutex.h"
+#include "starboard/time.h"
namespace cobalt {
namespace dom {
@@ -84,11 +87,31 @@
const starboard::Mutex& mutex)
: synchronization_required_(synchronization_required), mutex_(mutex) {
if (synchronization_required_) {
- mutex_.Acquire();
+ // Crash if we are trying to re-acquire again on the same thread.
+ CHECK_NE(acquired_thread_id_, SbThreadGetId());
+
+ SbTime start = SbTimeGetMonotonicNow();
+ SbTime wait_interval = kSbTimeMillisecond;
+ constexpr SbTime kMaxWaitInterval = kSbTimeMillisecond * 16;
+
+ for (;;) {
+ if (mutex_.AcquireTry()) {
+ break;
+ }
+ SbThreadSleep(wait_interval);
+ // Double the wait interval upon every failure, but cap it at
+ // kMaxWaitInterval.
+ wait_interval = std::min(wait_interval * 2, kMaxWaitInterval);
+ // Crash if we've been waiting for too long.
+ CHECK_LT(SbTimeGetMonotonicNow() - start, kSbTimeSecond);
+ }
+ acquired_thread_id_ = SbThreadGetId();
}
}
~ScopedLockWhenRequired() {
if (synchronization_required_) {
+ CHECK_EQ(acquired_thread_id_, SbThreadGetId());
+ acquired_thread_id_ = kSbThreadInvalidId;
mutex_.Release();
}
}
@@ -96,6 +119,7 @@
private:
const bool synchronization_required_;
const starboard::Mutex& mutex_;
+ SbThreadId acquired_thread_id_ = kSbThreadInvalidId;
};
Handle(bool synchronization_required,
@@ -270,7 +294,7 @@
return;
}
- auto task_runner = base::MessageLoop::current()->task_runner();
+ auto task_runner = base::ThreadTaskRunnerHandle::Get();
task_runner->PostTask(FROM_HERE,
base::BindOnce(&DefaultAlgorithmRunner::Process,
base::Unretained(this), handle));
@@ -282,7 +306,7 @@
DCHECK(handle);
TRACE_EVENT0("cobalt::dom", "DefaultAlgorithmRunner::Process()");
- auto task_runner = base::MessageLoop::current()->task_runner();
+ auto task_runner = base::ThreadTaskRunnerHandle::Get();
bool finished = false;
handle->Process(&finished);
diff --git a/cobalt/dom/source_buffer.cc b/cobalt/dom/source_buffer.cc
index 267677b..039303f 100644
--- a/cobalt/dom/source_buffer.cc
+++ b/cobalt/dom/source_buffer.cc
@@ -453,6 +453,17 @@
track_defaults_ = track_defaults;
}
+double SourceBuffer::write_head(script::ExceptionState* exception_state) const {
+ if (media_source_ == NULL) {
+ web::DOMException::Raise(web::DOMException::kInvalidStateErr,
+ exception_state);
+ return 0.0;
+ }
+
+ DCHECK(chunk_demuxer_);
+ return chunk_demuxer_->GetWriteHead(id_).InSecondsF();
+}
+
void SourceBuffer::OnRemovedFromMediaSource() {
if (media_source_ == NULL) {
return;
diff --git a/cobalt/dom/source_buffer.h b/cobalt/dom/source_buffer.h
index c611177..5869760 100644
--- a/cobalt/dom/source_buffer.h
+++ b/cobalt/dom/source_buffer.h
@@ -55,6 +55,7 @@
#include "base/message_loop/message_loop.h"
#include "base/optional.h"
#include "base/single_thread_task_runner.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "base/timer/timer.h"
#include "cobalt/base/token.h"
#include "cobalt/dom/audio_track_list.h"
@@ -134,6 +135,9 @@
// Custom, not in any spec.
//
+ // Return the highest presentation timestamp written to SbPlayer.
+ double write_head(script::ExceptionState* exception_state) const;
+
void OnRemovedFromMediaSource();
double GetHighestPresentationTimestamp() const;
@@ -160,7 +164,7 @@
private:
scoped_refptr<base::SingleThreadTaskRunner> task_runner_ =
- base::MessageLoop::current()->task_runner();
+ base::ThreadTaskRunnerHandle::Get();
// The access to |source_buffer_| always happens on |task_runner_|, and
// needn't be explicitly synchronized by a mutex.
SourceBuffer* source_buffer_;
diff --git a/cobalt/dom/source_buffer.idl b/cobalt/dom/source_buffer.idl
index 39a9248..0d81b03 100644
--- a/cobalt/dom/source_buffer.idl
+++ b/cobalt/dom/source_buffer.idl
@@ -33,4 +33,10 @@
[RaisesException] void abort();
[RaisesException] void remove(double start, unrestricted double end);
[RaisesException] attribute TrackDefaultList trackDefaults;
+
+ // Non standard interface (b/267678497).
+ // Returns the highest presentation timestamp written to SbPlayer, raises
+ // `InvalidStateError` if the SourceBuffer object has been removed from the
+ // MediaSource object.
+ [RaisesException] readonly attribute double writeHead;
};
diff --git a/cobalt/dom/user_agent_data_test.cc b/cobalt/dom/user_agent_data_test.cc
index 376fcda..cd6886f 100644
--- a/cobalt/dom/user_agent_data_test.cc
+++ b/cobalt/dom/user_agent_data_test.cc
@@ -87,7 +87,7 @@
platform_info_->set_starboard_version("");
platform_info_->set_os_name_and_version("");
platform_info_->set_original_design_manufacturer("");
- platform_info_->set_device_type(kSbSystemDeviceTypeUnknown);
+ platform_info_->set_device_type("UNKNOWN");
platform_info_->set_chipset_model_number("");
platform_info_->set_model_year("");
platform_info_->set_firmware_version("");
diff --git a/cobalt/dom/window.cc b/cobalt/dom/window.cc
index 1644cdc..8ff1bd9 100644
--- a/cobalt/dom/window.cc
+++ b/cobalt/dom/window.cc
@@ -19,6 +19,7 @@
#include "base/bind.h"
#include "base/bind_helpers.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "base/trace_event/trace_event.h"
#include "cobalt/base/polymorphic_downcast.h"
#include "cobalt/base/tokens.h"
@@ -196,7 +197,7 @@
// Document load start is deferred from this constructor so that we can be
// guaranteed that this Window object is fully constructed before document
// loading begins.
- base::MessageLoop::current()->task_runner()->PostTask(
+ base::ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE, base::Bind(&Window::StartDocumentLoad, base::Unretained(this),
fetcher_factory, settings->creation_url(),
dom_parser, load_complete_callback));
diff --git a/cobalt/h5vcc/dial/dial_server.cc b/cobalt/h5vcc/dial/dial_server.cc
index f68f570..9bb528d 100644
--- a/cobalt/h5vcc/dial/dial_server.cc
+++ b/cobalt/h5vcc/dial/dial_server.cc
@@ -14,6 +14,8 @@
#include "cobalt/h5vcc/dial/dial_server.h"
+#include "base/threading/thread_task_runner_handle.h"
+
#if defined(DIAL_SERVER)
#include <memory>
@@ -162,7 +164,7 @@
const base::WeakPtr<DialServer>& dial_server,
const std::string& service_name)
: dial_server_(dial_server), service_name_(service_name) {
- task_runner_ = base::MessageLoop::current()->task_runner();
+ task_runner_ = base::ThreadTaskRunnerHandle::Get();
}
DialServer::ServiceHandler::~ServiceHandler() {}
@@ -172,7 +174,7 @@
const CompletionCB& completion_cb) {
// This gets called on the DialService/Network thread.
// Post it to the WebModule thread.
- DCHECK_NE(base::MessageLoop::current()->task_runner(), task_runner_);
+ DCHECK_NE(base::ThreadTaskRunnerHandle::Get(), task_runner_);
task_runner_->PostTask(
FROM_HERE, base::Bind(&ServiceHandler::OnHandleRequest, this, path,
request, completion_cb));
@@ -181,7 +183,7 @@
void DialServer::ServiceHandler::OnHandleRequest(
const std::string& path, const net::HttpServerRequestInfo& request,
const CompletionCB& completion_cb) {
- DCHECK_EQ(base::MessageLoop::current()->task_runner(), task_runner_);
+ DCHECK_EQ(base::ThreadTaskRunnerHandle::Get(), task_runner_);
if (!dial_server_) {
completion_cb.Run(std::unique_ptr<net::HttpServerResponseInfo>());
return;
diff --git a/cobalt/h5vcc/h5vcc.cc b/cobalt/h5vcc/h5vcc.cc
index 48c2ed5..fdc0893 100644
--- a/cobalt/h5vcc/h5vcc.cc
+++ b/cobalt/h5vcc/h5vcc.cc
@@ -37,7 +37,8 @@
#if SB_IS(EVERGREEN)
settings.updater_module,
#endif
- settings.user_agent_data, settings.global_environment);
+ settings.user_agent_data, settings.global_environment,
+ settings.persistent_settings);
storage_ =
new H5vccStorage(settings.network_module, settings.persistent_settings);
trace_event_ = new H5vccTraceEvent();
diff --git a/cobalt/h5vcc/h5vcc_accessibility.cc b/cobalt/h5vcc/h5vcc_accessibility.cc
index 2463943..f248a0c 100644
--- a/cobalt/h5vcc/h5vcc_accessibility.cc
+++ b/cobalt/h5vcc/h5vcc_accessibility.cc
@@ -16,6 +16,7 @@
#include "base/command_line.h"
#include "base/message_loop/message_loop.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "cobalt/base/accessibility_settings_changed_event.h"
#include "cobalt/base/accessibility_text_to_speech_settings_changed_event.h"
#include "cobalt/browser/switches.h"
@@ -42,7 +43,7 @@
H5vccAccessibility::H5vccAccessibility(base::EventDispatcher* event_dispatcher)
: event_dispatcher_(event_dispatcher) {
- task_runner_ = base::MessageLoop::current()->task_runner();
+ task_runner_ = base::ThreadTaskRunnerHandle::Get();
on_application_event_callback_ = base::Bind(
&H5vccAccessibility::OnApplicationEvent, base::Unretained(this));
event_dispatcher_->AddEventCallback(
@@ -99,29 +100,28 @@
void H5vccAccessibility::AddTextToSpeechListener(
const H5vccAccessibilityCallbackHolder& holder) {
- DCHECK_EQ(base::MessageLoop::current()->task_runner(), task_runner_);
+ DCHECK_EQ(base::ThreadTaskRunnerHandle::Get(), task_runner_);
text_to_speech_listener_.reset(
new H5vccAccessibilityCallbackReference(this, holder));
}
void H5vccAccessibility::AddHighContrastTextListener(
const H5vccAccessibilityCallbackHolder& holder) {
- DCHECK_EQ(base::MessageLoop::current()->task_runner(), task_runner_);
+ DCHECK_EQ(base::ThreadTaskRunnerHandle::Get(), task_runner_);
high_contrast_text_listener_.reset(
new H5vccAccessibilityCallbackReference(this, holder));
}
void H5vccAccessibility::OnApplicationEvent(const base::Event* event) {
// This method should be called from the application event thread.
- DCHECK_NE(base::MessageLoop::current()->task_runner(), task_runner_);
+ DCHECK_NE(base::ThreadTaskRunnerHandle::Get(), task_runner_);
task_runner_->PostTask(
FROM_HERE, base::Bind(&H5vccAccessibility::InternalOnApplicationEvent,
- base::Unretained(this),
- event->GetTypeId()));
+ base::Unretained(this), event->GetTypeId()));
}
void H5vccAccessibility::InternalOnApplicationEvent(base::TypeId type) {
- DCHECK_EQ(base::MessageLoop::current()->task_runner(), task_runner_);
+ DCHECK_EQ(base::ThreadTaskRunnerHandle::Get(), task_runner_);
if (type == base::AccessibilitySettingsChangedEvent::TypeId() &&
high_contrast_text_listener_) {
high_contrast_text_listener_->value().Run();
diff --git a/cobalt/h5vcc/h5vcc_event_listener_container.h b/cobalt/h5vcc/h5vcc_event_listener_container.h
index 7da01c0..f2a0876 100644
--- a/cobalt/h5vcc/h5vcc_event_listener_container.h
+++ b/cobalt/h5vcc/h5vcc_event_listener_container.h
@@ -22,6 +22,7 @@
#include "base/location.h"
#include "base/message_loop/message_loop.h"
#include "base/synchronization/lock.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "cobalt/script/callback_function.h"
#include "cobalt/script/script_value.h"
#include "cobalt/script/wrappable.h"
@@ -48,12 +49,12 @@
struct Listener {
Listener(script::Wrappable* owner, const CallbackHolderType& cb)
: callback(owner, cb),
- task_runner(base::MessageLoop::current()->task_runner()) {}
+ task_runner(base::ThreadTaskRunnerHandle::Get()) {}
// Notifies listener. Must be called on the same message loop the
// listener registered its callback from.
void Notify(GetArgumentCallback on_notify) {
- DCHECK_EQ(base::MessageLoop::current()->task_runner(), task_runner);
+ DCHECK_EQ(base::ThreadTaskRunnerHandle::Get(), task_runner);
CallbackArgType arg = on_notify.Run();
callback.value().Run(arg);
}
@@ -128,10 +129,10 @@
// Explicit template specialization for the no callback argument case, where
// we don't need to call the |GetArgumentCallback| callback.
template <>
-inline void
- H5vccEventListenerContainer<void, script::CallbackFunction<void()> >::
- Listener::Notify(GetArgumentCallback) {
- DCHECK_EQ(base::MessageLoop::current()->task_runner(), task_runner);
+inline void H5vccEventListenerContainer<
+ void,
+ script::CallbackFunction<void()> >::Listener::Notify(GetArgumentCallback) {
+ DCHECK_EQ(base::ThreadTaskRunnerHandle::Get(), task_runner);
callback.value().Run();
}
diff --git a/cobalt/h5vcc/h5vcc_platform_service.cc b/cobalt/h5vcc/h5vcc_platform_service.cc
index 27f7510..aaa07e6 100644
--- a/cobalt/h5vcc/h5vcc_platform_service.cc
+++ b/cobalt/h5vcc/h5vcc_platform_service.cc
@@ -17,6 +17,7 @@
#include <utility>
#include <vector>
+#include "base/threading/thread_task_runner_handle.h"
#include "cobalt/base/polymorphic_downcast.h"
#include "cobalt/web/context.h"
#include "cobalt/web/environment_settings.h"
@@ -66,7 +67,7 @@
: environment_(environment),
platform_service_api_(platform_service_api),
receive_callback_(this, receive_callback),
- main_message_loop_(base::MessageLoop::current()->task_runner()),
+ main_message_loop_(base::ThreadTaskRunnerHandle::Get()),
ALLOW_THIS_IN_INITIALIZER_LIST(weak_ptr_factory_(this)),
ALLOW_THIS_IN_INITIALIZER_LIST(
weak_this_(weak_ptr_factory_.GetWeakPtr())) {
diff --git a/cobalt/h5vcc/h5vcc_settings.cc b/cobalt/h5vcc/h5vcc_settings.cc
index ca8a59c..574852d 100644
--- a/cobalt/h5vcc/h5vcc_settings.cc
+++ b/cobalt/h5vcc/h5vcc_settings.cc
@@ -16,6 +16,10 @@
#include <string.h>
+#include <memory>
+
+#include "cobalt/network/network_module.h"
+
namespace cobalt {
namespace h5vcc {
@@ -25,8 +29,8 @@
// option 1 disables all video codecs except av1
// option 2 disables all video codecs except vp9
constexpr std::array<const char*, 3> kDisableCodecCombinations{
- "av01;hev1;hvc1;vp09;vp8.vp9", "avc1;avc3;hev1;hvc1;vp09;vp8;vp9",
- "av01;avc1;avc3;hev1;hvc1;vp8"};
+ {"av01;hev1;hvc1;vp09;vp8.vp9", "avc1;avc3;hev1;hvc1;vp09;vp8;vp9",
+ "av01;avc1;avc3;hev1;hvc1;vp8"}};
}; // namespace
H5vccSettings::H5vccSettings(
@@ -38,7 +42,8 @@
cobalt::updater::UpdaterModule* updater_module,
#endif
web::NavigatorUAData* user_agent_data,
- script::GlobalEnvironment* global_environment)
+ script::GlobalEnvironment* global_environment,
+ persistent_storage::PersistentSettings* persistent_settings)
: set_web_setting_func_(set_web_setting_func),
media_module_(media_module),
can_play_type_handler_(can_play_type_handler),
@@ -47,13 +52,15 @@
updater_module_(updater_module),
#endif
user_agent_data_(user_agent_data),
- global_environment_(global_environment) {
+ global_environment_(global_environment),
+ persistent_settings_(persistent_settings) {
}
bool H5vccSettings::Set(const std::string& name, int32 value) const {
const char kMediaPrefix[] = "Media.";
const char kDisableMediaCodec[] = "DisableMediaCodec";
const char kNavigatorUAData[] = "NavigatorUAData";
+ const char kClientHintHeaders[] = "ClientHintHeaders";
const char kQUIC[] = "QUIC";
#if SB_IS(EVERGREEN)
@@ -82,6 +89,17 @@
return true;
}
+ if (name.compare(kClientHintHeaders) == 0) {
+ if (!persistent_settings_) {
+ return false;
+ } else {
+ persistent_settings_->SetPersistentSetting(
+ network::kClientHintHeadersEnabledPersistentSettingsKey,
+ std::make_unique<base::Value>(value != 0));
+ return true;
+ }
+ }
+
if (name.compare(kQUIC) == 0) {
if (!network_module_) {
return false;
diff --git a/cobalt/h5vcc/h5vcc_settings.h b/cobalt/h5vcc/h5vcc_settings.h
index e6e5b4a..34e7a44 100644
--- a/cobalt/h5vcc/h5vcc_settings.h
+++ b/cobalt/h5vcc/h5vcc_settings.h
@@ -19,6 +19,7 @@
#include "cobalt/media/media_module.h"
#include "cobalt/network/network_module.h"
+#include "cobalt/persistent_storage/persistent_settings.h"
#include "cobalt/script/global_environment.h"
#include "cobalt/script/wrappable.h"
#include "cobalt/web/navigator_ua_data.h"
@@ -46,7 +47,8 @@
cobalt::updater::UpdaterModule* updater_module,
#endif
web::NavigatorUAData* user_agent_data,
- script::GlobalEnvironment* global_environment);
+ script::GlobalEnvironment* global_environment,
+ persistent_storage::PersistentSettings* persistent_settings);
// Returns true when the setting is set successfully or if the setting has
// already been set to the expected value. Returns false when the setting is
@@ -65,6 +67,7 @@
#endif
web::NavigatorUAData* user_agent_data_;
script::GlobalEnvironment* global_environment_;
+ persistent_storage::PersistentSettings* persistent_settings_;
DISALLOW_COPY_AND_ASSIGN(H5vccSettings);
};
diff --git a/cobalt/h5vcc/h5vcc_storage.cc b/cobalt/h5vcc/h5vcc_storage.cc
index 6c83852..0bb660c 100644
--- a/cobalt/h5vcc/h5vcc_storage.cc
+++ b/cobalt/h5vcc/h5vcc_storage.cc
@@ -119,7 +119,7 @@
net::CookieStore* cookie_store =
network_module_->url_request_context()->cookie_store();
auto* cookie_monster = static_cast<net::CookieMonster*>(cookie_store);
- network_module_->task_runner()->PostBlockingTask(
+ network_module_->task_runner()->PostTask(
FROM_HERE,
base::Bind(&net::CookieMonster::DeleteAllMatchingInfoAsync,
base::Unretained(cookie_monster), net::CookieDeletionInfo(),
diff --git a/cobalt/h5vcc/h5vcc_system.cc b/cobalt/h5vcc/h5vcc_system.cc
index b28e941..89c9d9a 100644
--- a/cobalt/h5vcc/h5vcc_system.cc
+++ b/cobalt/h5vcc/h5vcc_system.cc
@@ -18,8 +18,11 @@
#include "cobalt/configuration/configuration.h"
#include "cobalt/version.h"
#include "cobalt_build_id.h" // NOLINT(build/include_subdir)
+#include "starboard/common/system_property.h"
#include "starboard/system.h"
+using starboard::kSystemPropertyMaxLength;
+
namespace cobalt {
namespace h5vcc {
@@ -55,7 +58,6 @@
std::string H5vccSystem::advertising_id() const {
std::string result;
#if SB_API_VERSION >= 14
- const size_t kSystemPropertyMaxLength = 1024;
char property[kSystemPropertyMaxLength] = {0};
if (!SbSystemGetProperty(kSbSystemPropertyAdvertisingId, property,
SB_ARRAY_SIZE_INT(property))) {
@@ -69,7 +71,6 @@
bool H5vccSystem::limit_ad_tracking() const {
bool result = false;
#if SB_API_VERSION >= 14
- const size_t kSystemPropertyMaxLength = 1024;
char property[kSystemPropertyMaxLength] = {0};
if (!SbSystemGetProperty(kSbSystemPropertyLimitAdTracking, property,
SB_ARRAY_SIZE_INT(property))) {
diff --git a/cobalt/input/BUILD.gn b/cobalt/input/BUILD.gn
index d97732f..912eb83 100644
--- a/cobalt/input/BUILD.gn
+++ b/cobalt/input/BUILD.gn
@@ -35,8 +35,8 @@
if (enable_vr) {
sources += [
- "private/camera_3d_vr.cc",
- "private/camera_3d_vr.h",
+ "//internal/cobalt/input/private/camera_3d_vr.cc",
+ "//internal/cobalt/input/private/camera_3d_vr.h",
]
} else {
sources += [
diff --git a/cobalt/input/input_device_manager_fuzzer.cc b/cobalt/input/input_device_manager_fuzzer.cc
index 02dcf1f..866b764 100644
--- a/cobalt/input/input_device_manager_fuzzer.cc
+++ b/cobalt/input/input_device_manager_fuzzer.cc
@@ -18,6 +18,7 @@
#include "base/bind.h"
#include "base/message_loop/message_loop.h"
#include "base/rand_util.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "cobalt/base/tokens.h"
#include "cobalt/dom/keyboard_event.h"
#include "cobalt/dom/keycode.h"
@@ -121,7 +122,7 @@
keyboard_event_callback_.Run(base::Tokens::keydown(), event_init);
keyboard_event_callback_.Run(base::Tokens::keyup(), event_init);
- base::MessageLoop::current()->task_runner()->PostDelayedTask(
+ base::ThreadTaskRunnerHandle::Get()->PostDelayedTask(
FROM_HERE,
base::Bind(&InputDeviceManagerFuzzer::OnNextEvent,
base::Unretained(this)),
diff --git a/cobalt/input/keypress_generator_filter.cc b/cobalt/input/keypress_generator_filter.cc
index 75b35a3..3844a27 100644
--- a/cobalt/input/keypress_generator_filter.cc
+++ b/cobalt/input/keypress_generator_filter.cc
@@ -55,14 +55,10 @@
int char_code = dom::KeyboardEvent::ComputeCharCode(orig_event.key_code(),
orig_event.shift_key());
- if (char_code > 0) {
- dom::KeyboardEventInit event(orig_event);
- event.set_char_code(char_code);
- DispatchKeyboardEvent(base::Tokens::keypress(), event);
- return true;
- }
-
- return false;
+ dom::KeyboardEventInit event(orig_event);
+ event.set_char_code(char_code);
+ DispatchKeyboardEvent(base::Tokens::keypress(), event);
+ return true;
}
} // namespace input
diff --git a/cobalt/layout/layout_manager.cc b/cobalt/layout/layout_manager.cc
index 6592f0e..500d060 100644
--- a/cobalt/layout/layout_manager.cc
+++ b/cobalt/layout/layout_manager.cc
@@ -21,6 +21,7 @@
#include "base/bind.h"
#include "base/memory/ptr_util.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "base/timer/timer.h"
#include "base/trace_event/trace_event.h"
#include "cobalt/cssom/cascade_precedence.h"
@@ -231,7 +232,7 @@
DirtyLayout();
// Run the |DoLayoutAndProduceRenderTree| task after onload event finished.
- base::MessageLoop::current()->task_runner()->PostTask(
+ base::ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE,
base::Bind(&LayoutManager::Impl::DoLayoutAndProduceRenderTree,
base::Unretained(this)));
diff --git a/cobalt/layout_tests/layout_snapshot.cc b/cobalt/layout_tests/layout_snapshot.cc
index e137530..a600395 100644
--- a/cobalt/layout_tests/layout_snapshot.cc
+++ b/cobalt/layout_tests/layout_snapshot.cc
@@ -20,6 +20,8 @@
#include "base/path_service.h"
#include "base/run_loop.h"
#include "base/synchronization/waitable_event.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "cobalt/browser/client_hint_headers.h"
#include "cobalt/browser/user_agent_string.h"
#include "cobalt/browser/web_module.h"
#include "cobalt/cssom/viewport_size.h"
@@ -37,8 +39,8 @@
namespace {
void Quit(base::RunLoop* run_loop) {
- base::MessageLoop::current()->task_runner()->PostTask(
- FROM_HERE, run_loop->QuitClosure());
+ base::ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ run_loop->QuitClosure());
}
// Called when layout completes and results have been produced. We use this
@@ -75,10 +77,10 @@
// don't interfere.
net_options.https_requirement = network::kHTTPSOptional;
web::WebSettingsImpl web_settings;
+ browser::UserAgentPlatformInfo platform_info;
network::NetworkModule network_module(
- browser::CreateUserAgentString(
- browser::GetUserAgentPlatformInfoFromSystem()),
- NULL, NULL, net_options);
+ browser::CreateUserAgentString(platform_info),
+ browser::GetClientHintHeaders(platform_info), NULL, NULL, net_options);
// Use 128M of image cache to minimize the effect of image loading.
const size_t kImageCacheCapacity = 128 * 1024 * 1024;
diff --git a/cobalt/layout_tests/layout_tests.cc b/cobalt/layout_tests/layout_tests.cc
index 0e2f11f..5c9d0f1 100644
--- a/cobalt/layout_tests/layout_tests.cc
+++ b/cobalt/layout_tests/layout_tests.cc
@@ -20,6 +20,7 @@
#include "base/message_loop/message_loop.h"
#include "base/path_service.h"
#include "base/test/scoped_task_environment.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "cobalt/base/cobalt_paths.h"
#include "cobalt/cssom/viewport_size.h"
#include "cobalt/layout_tests/layout_snapshot.h"
@@ -137,8 +138,7 @@
browser::WebModule::LayoutResults layout_results = SnapshotURL(
test_info.url, viewport_size, pixel_tester.GetResourceProvider(),
- base::Bind(&ScreenshotFunction,
- base::MessageLoop::current()->task_runner(),
+ base::Bind(&ScreenshotFunction, base::ThreadTaskRunnerHandle::Get(),
base::Unretained(&pixel_tester)));
scoped_refptr<render_tree::animations::AnimateNode> animate_node =
diff --git a/cobalt/layout_tests/testdata/web-platform-tests/service-workers/web_platform_tests.txt b/cobalt/layout_tests/testdata/web-platform-tests/service-workers/web_platform_tests.txt
index 8072c2b..1132c15 100644
--- a/cobalt/layout_tests/testdata/web-platform-tests/service-workers/web_platform_tests.txt
+++ b/cobalt/layout_tests/testdata/web-platform-tests/service-workers/web_platform_tests.txt
@@ -2,8 +2,6 @@
service-worker/clients-matchall-on-evaluation.https.html, PASS
service-worker/fetch-event-add-async.https.html, PASS
-service-worker/import-scripts-cross-origin.https.html, PASS
-service-worker/import-scripts-mime-types.https.html, PASS
service-worker/import-scripts-resource-map.https.html, PASS
service-worker/import-scripts-updated-flag.https.html, PASS
service-worker/register-default-scope.https.html, PASS
@@ -11,30 +9,36 @@
service-worker/registration-security-error.https.html, PASS
service-worker/registration-script-url.https.html, PASS
service-worker/rejections.https.html, PASS
-service-worker/serviceworkerobject-scripturl.https.html, PASS
service-worker/service-worker-csp-default.https.html, PASS
service-worker/service-worker-csp-connect.https.html, PASS
-service-worker/service-worker-header.https.html, PASS
service-worker/service-worker-csp-script.https.html, PASS
service-worker/Service-Worker-Allowed-header.https.html, PASS
service-worker/skip-waiting-without-client.https.html, PASS
service-worker/uncontrolled-page.https.html, PASS
service-worker/unregister.https.html, PASS
-service-worker/update-missing-import-scripts.https.html, PASS
-service-worker/update-result.https.html, PASS
+service-worker/update-no-cache-request-headers.https.html, PASS
-# Tests pass with memory leakage issue.
-service-worker/update-no-cache-request-headers.https.html, DISABLE
+# b/274011216 flaky test
+service-worker/update-result.https.html, DISABLE
+
+# TODO(b/275914032): Flaky test.
+service-worker/update-missing-import-scripts.https.html, DISABLE
+
+# b/275643772 MIME type check is flaky
+service-worker/import-scripts-mime-types.https.html, DISABLE
# b/234788479 Implement waiting for update worker state tasks in Install algorithm.
service-worker/activation-after-registration.https.html, DISABLE
service-worker/activate-event-after-install-state-change.https.html, DISABLE
+service-worker/import-scripts-cross-origin.https.html, DISABLE
service-worker/import-scripts-redirect.https.html, DISABLE
service-worker/multiple-update.https.html, DISABLE
service-worker/register-wait-forever-in-install-worker.https.html, DISABLE
service-worker/registration-service-worker-attributes.https.html, DISABLE
+service-worker/service-worker-header.https.html, DISABLE
service-worker/state.https.html, DISABLE
service-worker/synced-state.https.html, DISABLE
+service-worker/serviceworkerobject-scripturl.https.html, DISABLE
# "Module" type of dedicated worker is supported in Cobalt
service-worker/dedicated-worker-service-worker-interception.https.html, DISABLE
diff --git a/cobalt/layout_tests/testdata/web-platform-tests/workers/web_platform_tests.txt b/cobalt/layout_tests/testdata/web-platform-tests/workers/web_platform_tests.txt
index 48654c3..9b36ff3 100644
--- a/cobalt/layout_tests/testdata/web-platform-tests/workers/web_platform_tests.txt
+++ b/cobalt/layout_tests/testdata/web-platform-tests/workers/web_platform_tests.txt
@@ -5,11 +5,9 @@
# features that are expected to currently work.
Worker_basic.htm, PASS
+Worker_cross_origin_security_err.htm, PASS
Worker_dispatchEvent_ErrorEvent.htm, PASS
-# b/225037465
-Worker_cross_origin_security_err.htm, DISABLE
-
# b/275741116
Worker_ErrorEvent_bubbles_cancelable.htm, DISABLE
Worker_ErrorEvent_filename.htm, DISABLE
diff --git a/cobalt/layout_tests/web_platform_tests.cc b/cobalt/layout_tests/web_platform_tests.cc
index ac6e3fd..05c2376 100644
--- a/cobalt/layout_tests/web_platform_tests.cc
+++ b/cobalt/layout_tests/web_platform_tests.cc
@@ -24,6 +24,7 @@
#include "base/run_loop.h"
#include "base/strings/string_util.h"
#include "base/test/scoped_task_environment.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "base/values.h"
#include "cobalt/browser/service_worker_registry.h"
#include "cobalt/browser/user_agent_platform_info.h"
@@ -150,8 +151,8 @@
};
void Quit(base::RunLoop* run_loop) {
- base::MessageLoop::current()->task_runner()->PostTask(
- FROM_HERE, run_loop->QuitClosure());
+ base::ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ run_loop->QuitClosure());
}
// Called upon window.close(), which indicates that the test has finished.
@@ -213,33 +214,36 @@
web::CspDelegateFactory::GetInstance()->OverrideCreator(
web::kCspEnforcementEnable, CspDelegatePermissive::Create);
+
+ std::unique_ptr<browser::UserAgentPlatformInfo> platform_info(
+ new browser::UserAgentPlatformInfo());
+ std::unique_ptr<browser::ServiceWorkerRegistry> service_worker_registry(
+ new browser::ServiceWorkerRegistry(&web_settings, &network_module,
+ platform_info.get(), url));
+
+ browser::WebModule::Options web_module_options;
// Use test runner mode to allow the content itself to dictate when it is
// ready for layout should be performed. See cobalt/dom/test_runner.h.
- browser::WebModule::Options web_module_options;
web_module_options.layout_trigger = layout::LayoutManager::kTestRunnerMode;
+
// We assume that we won't suspend/resume while running the tests, and so
// we take advantage of the convenience of inline script tags.
web_module_options.enable_inline_script_warnings = false;
web_module_options.web_options.web_settings = &web_settings;
web_module_options.web_options.network_module = &network_module;
+ web_module_options.web_options.service_worker_jobs =
+ service_worker_registry->service_worker_jobs();
+ web_module_options.web_options.platform_info = platform_info.get();
// Prepare a slot for our results to be placed when ready.
base::Optional<browser::WebModule::LayoutResults> results;
base::RunLoop run_loop;
- // Create the WebModule and wait for a layout to occur.
- browser::WebModule web_module("RunWebPlatformTest");
-
- // Create Service Worker Registry
- browser::ServiceWorkerRegistry* service_worker_registry =
- new browser::ServiceWorkerRegistry(&web_settings, &network_module,
- new browser::UserAgentPlatformInfo(),
- url);
- web_module_options.web_options.service_worker_jobs =
- service_worker_registry->service_worker_jobs();
-
- web_module.Run(
+ // Run the WebModule and wait for a layout to occur.
+ std::unique_ptr<browser::WebModule> web_module(
+ new browser::WebModule("RunWebPlatformTest"));
+ web_module->Run(
url, base::kApplicationStateStarted, nullptr /* scroll_engine */,
base::Bind(&WebModuleOnRenderTreeProducedCallback, &results),
base::Bind(&WebModuleErrorCallback, &run_loop,
@@ -249,12 +253,16 @@
can_play_type_handler.get(), media_module.get(), kDefaultViewportSize,
&resource_provider, 60.0f, web_module_options);
run_loop.Run();
+
const std::string extract_results =
"document.getElementById(\"__testharness__results__\").textContent;";
std::string output;
- web_module.ExecuteJavascript(extract_results,
- base::SourceLocation(__FILE__, __LINE__, 1),
- &output, got_results);
+ web_module->ExecuteJavascript(extract_results,
+ base::SourceLocation(__FILE__, __LINE__, 1),
+ &output, got_results);
+ // Ensure that the WebModule stops before stopping the ServiceWorkerRegistry.
+ web_module.reset();
+ service_worker_registry.reset();
return output;
}
diff --git a/cobalt/loader/about_fetcher.cc b/cobalt/loader/about_fetcher.cc
index 33d32d8..0f43428 100644
--- a/cobalt/loader/about_fetcher.cc
+++ b/cobalt/loader/about_fetcher.cc
@@ -16,6 +16,7 @@
#include "base/bind.h"
#include "base/message_loop/message_loop.h"
+#include "base/threading/thread_task_runner_handle.h"
namespace cobalt {
namespace loader {
@@ -23,7 +24,7 @@
AboutFetcher::AboutFetcher(Handler* handler)
: Fetcher(handler),
ALLOW_THIS_IN_INITIALIZER_LIST(weak_ptr_factory_(this)) {
- base::MessageLoop::current()->task_runner()->PostTask(
+ base::ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE,
base::Bind(&AboutFetcher::Fetch, weak_ptr_factory_.GetWeakPtr()));
}
diff --git a/cobalt/loader/cors_preflight.cc b/cobalt/loader/cors_preflight.cc
index 53e501d..93f96d2 100644
--- a/cobalt/loader/cors_preflight.cc
+++ b/cobalt/loader/cors_preflight.cc
@@ -280,6 +280,7 @@
url_fetcher_ = net::URLFetcher::Create(url_, net::URLFetcher::OPTIONS, this);
url_fetcher_->SetRequestContext(
network_module_->url_request_context_getter().get());
+ network_module_->AddClientHintHeaders(*url_fetcher_);
url_fetcher_->AddExtraRequestHeader(kOriginheadername + origin_);
// 3. Let headers be the names of request's header list's headers,
// excluding CORS-safelisted request-headers and duplicates, sorted
@@ -331,7 +332,8 @@
net::HttpResponseHeaders* response_headers = source->GetResponseHeaders();
std::string methods, headernames;
// If status is not ok status, return network error
- if (!CORSCheck(*response_headers, origin_, credentials_mode_is_include_) ||
+ if (!CORSCheck(*response_headers, origin_, credentials_mode_is_include_,
+ cors_policy_) ||
source->GetResponseCode() < 200 || source->GetResponseCode() > 299) {
error_callback_.Run();
return;
@@ -406,7 +408,14 @@
// https://fetch.spec.whatwg.org/#concept-cors-check
bool CORSPreflight::CORSCheck(const net::HttpResponseHeaders& response_headers,
const std::string& serialized_origin,
- bool credentials_mode_is_include) {
+ bool credentials_mode_is_include,
+ network::CORSPolicy cors_policy) {
+#ifndef COBALT_FORCE_CORS
+ if (cors_policy == network::kCORSOptional) {
+ DLOG(WARNING) << "Cors check disabled, allowing request without checking.";
+ return true;
+ }
+#endif
// 1. Let origin be the result of extracting header list values given `Access-
// Control-Allow-Origin` and response's header list.
std::string allowed_origin, empty_container, allow_credentials;
diff --git a/cobalt/loader/cors_preflight.h b/cobalt/loader/cors_preflight.h
index 8afa99d..a7476c8 100644
--- a/cobalt/loader/cors_preflight.h
+++ b/cobalt/loader/cors_preflight.h
@@ -27,6 +27,7 @@
#include "base/message_loop/message_loop.h"
#include "base/threading/thread_checker.h"
#include "cobalt/loader/cors_preflight_cache.h"
+#include "cobalt/network/network_delegate.h"
#include "cobalt/network/network_module.h"
#include "net/http/http_request_headers.h"
#include "net/http/http_response_headers.h"
@@ -57,6 +58,9 @@
void set_headers(const net::HttpRequestHeaders& headers) {
headers_ = headers;
}
+ void set_cors_policy(const network::CORSPolicy cors_policy) {
+ cors_policy_ = cors_policy;
+ }
// Determine if CORS Preflight is needed by a cross origin request
bool IsPreflightNeeded();
// The send method can be called after initializing this class.
@@ -65,9 +69,10 @@
bool Send();
// CORS Check is done on response to ensure simple CORS request is
// allowed by the server.
- static bool CORSCheck(const net::HttpResponseHeaders& response_headers,
- const std::string& serialized_origin,
- bool credentials_mode_is_include);
+ static bool CORSCheck(
+ const net::HttpResponseHeaders& response_headers,
+ const std::string& serialized_origin, bool credentials_mode_is_include,
+ network::CORSPolicy cors_policy = network::kCORSRequired);
// Checks if a header(a name-value pair) is a CORS-Safelisted request-header.
static bool IsSafeRequestHeader(const std::string& name,
const std::string& value);
@@ -88,6 +93,7 @@
bool credentials_mode_is_include_;
bool force_preflight_;
+ network::CORSPolicy cors_policy_ = network::kCORSRequired;
GURL url_;
net::URLFetcher::RequestType method_;
const network::NetworkModule* network_module_;
diff --git a/cobalt/loader/error_fetcher.cc b/cobalt/loader/error_fetcher.cc
index 41aa2e4..cc18c76 100644
--- a/cobalt/loader/error_fetcher.cc
+++ b/cobalt/loader/error_fetcher.cc
@@ -16,6 +16,7 @@
#include "base/bind.h"
#include "base/message_loop/message_loop.h"
+#include "base/threading/thread_task_runner_handle.h"
namespace cobalt {
namespace loader {
@@ -24,7 +25,7 @@
: Fetcher(handler),
error_message_(error_message),
ALLOW_THIS_IN_INITIALIZER_LIST(weak_ptr_factory_(this)) {
- base::MessageLoop::current()->task_runner()->PostTask(
+ base::ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE,
base::Bind(&ErrorFetcher::Fetch, weak_ptr_factory_.GetWeakPtr()));
}
diff --git a/cobalt/loader/fetch_interceptor_coordinator.cc b/cobalt/loader/fetch_interceptor_coordinator.cc
index a7599fc..e4d2af9 100644
--- a/cobalt/loader/fetch_interceptor_coordinator.cc
+++ b/cobalt/loader/fetch_interceptor_coordinator.cc
@@ -39,7 +39,7 @@
void FetchInterceptorCoordinator::TryIntercept(
const GURL& url, bool main_resource,
const net::HttpRequestHeaders& request_headers,
- scoped_refptr<base::SingleThreadTaskRunner> callback_task_runner,
+ scoped_refptr<base::SequencedTaskRunner> callback_task_runner,
base::OnceCallback<void(std::unique_ptr<std::string>)> callback,
base::OnceCallback<void(const net::LoadTimingInfo&)>
report_load_timing_info,
diff --git a/cobalt/loader/fetch_interceptor_coordinator.h b/cobalt/loader/fetch_interceptor_coordinator.h
index b266807..82ec85f 100644
--- a/cobalt/loader/fetch_interceptor_coordinator.h
+++ b/cobalt/loader/fetch_interceptor_coordinator.h
@@ -19,7 +19,7 @@
#include <string>
#include "base/bind.h"
-#include "base/single_thread_task_runner.h"
+#include "base/sequenced_task_runner.h"
#include "net/base/load_timing_info.h"
#include "net/http/http_request_headers.h"
#include "url/gurl.h"
@@ -37,7 +37,7 @@
virtual void StartFetch(
const GURL& url, bool main_resource,
const net::HttpRequestHeaders& request_headers,
- scoped_refptr<base::SingleThreadTaskRunner> callback_task_runner,
+ scoped_refptr<base::SequencedTaskRunner> callback_task_runner,
base::OnceCallback<void(std::unique_ptr<std::string>)> callback,
base::OnceCallback<void(const net::LoadTimingInfo&)>
report_load_timing_info,
@@ -58,7 +58,7 @@
void TryIntercept(
const GURL& url, bool main_resource,
const net::HttpRequestHeaders& request_headers,
- scoped_refptr<base::SingleThreadTaskRunner> callback_task_runner,
+ scoped_refptr<base::SequencedTaskRunner> callback_task_runner,
base::OnceCallback<void(std::unique_ptr<std::string>)> callback,
base::OnceCallback<void(const net::LoadTimingInfo&)>
report_load_timing_info,
diff --git a/cobalt/loader/fetcher_cache.cc b/cobalt/loader/fetcher_cache.cc
index 8d6b707..1e6adb5 100644
--- a/cobalt/loader/fetcher_cache.cc
+++ b/cobalt/loader/fetcher_cache.cc
@@ -33,7 +33,7 @@
const std::string& url,
const scoped_refptr<net::HttpResponseHeaders>& headers,
const Origin& last_url_origin, bool did_fail_from_transient_error,
- std::string* data)>
+ std::string data)>
SuccessCallback;
CachedFetcherHandler(const std::string& url, Fetcher::Handler* handler,
@@ -52,6 +52,7 @@
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
DCHECK(!wrapping_fetcher_);
DCHECK(wrapping_fetcher);
+
wrapping_fetcher_ = wrapping_fetcher;
}
@@ -63,6 +64,7 @@
// TODO: Respect HttpResponseHeaders::GetMaxAgeValue().
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
DCHECK(wrapping_fetcher_);
+
auto response = handler_->OnResponseStarted(wrapping_fetcher_, headers);
if (response == kLoadResponseContinue && headers) {
headers_ = headers;
@@ -77,6 +79,7 @@
void OnReceived(Fetcher*, const char* data, size_t size) override {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
DCHECK(wrapping_fetcher_);
+
data_.insert(data_.end(), data, data + size);
handler_->OnReceived(wrapping_fetcher_, data, size);
}
@@ -84,6 +87,7 @@
void OnReceivedPassed(Fetcher*, std::unique_ptr<std::string> data) override {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
DCHECK(wrapping_fetcher_);
+
data_.insert(data_.end(), data->begin(), data->end());
handler_->OnReceivedPassed(wrapping_fetcher_, std::move(data));
}
@@ -91,15 +95,17 @@
void OnDone(Fetcher*) override {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
DCHECK(wrapping_fetcher_);
+
handler_->OnDone(wrapping_fetcher_);
on_success_callback_.Run(
url_, headers_, wrapping_fetcher_->last_url_origin(),
- wrapping_fetcher_->did_fail_from_transient_error(), &data_);
+ wrapping_fetcher_->did_fail_from_transient_error(), std::move(data_));
}
void OnError(Fetcher*, const std::string& error) override {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
DCHECK(wrapping_fetcher_);
+
handler_->OnError(wrapping_fetcher_, error);
}
@@ -184,13 +190,12 @@
public:
CacheEntry(const scoped_refptr<net::HttpResponseHeaders>& headers,
const Origin& last_url_origin, bool did_fail_from_transient_error,
- std::string* data)
+ std::string data)
: headers_(headers),
last_url_origin_(last_url_origin),
- did_fail_from_transient_error_(did_fail_from_transient_error) {
- DCHECK(data);
- data_.swap(*data);
- }
+ did_fail_from_transient_error_(did_fail_from_transient_error),
+ data_(std::move(data)),
+ capacity_(data_.capacity()) {}
const scoped_refptr<net::HttpResponseHeaders>& headers() const {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
@@ -213,16 +218,24 @@
}
size_t capacity() const {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
- return data_.capacity();
+ CHECK_EQ(capacity_, data_.capacity());
+ return capacity_;
}
private:
+ CacheEntry(const CacheEntry&) = delete;
+ CacheEntry& operator=(const CacheEntry&) = delete;
+
THREAD_CHECKER(thread_checker_);
scoped_refptr<net::HttpResponseHeaders> headers_;
- Origin last_url_origin_;
- bool did_fail_from_transient_error_;
- std::string data_;
+ const Origin last_url_origin_;
+ const bool did_fail_from_transient_error_;
+ const std::string data_;
+
+ // TODO(b/270993319): For debugging cache integrity issues in production only,
+ // remove after identifying the root cause.
+ const size_t capacity_;
};
FetcherCache::FetcherCache(const char* name, size_t capacity)
@@ -236,12 +249,15 @@
FetcherCache::~FetcherCache() {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+ CHECK_EQ(thread_id_, SbThreadGetId());
+ CHECK(destroy_soon_called_);
while (!cache_entries_.empty()) {
delete cache_entries_.begin()->second;
- cache_entries_.erase(cache_entries_.begin());
+ cache_entries_.pop_front();
}
+ total_size_ = 0;
memory_size_in_bytes_ = 0;
count_resources_cached_ = 0;
}
@@ -251,12 +267,13 @@
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
DCHECK(!real_fetcher_creator.is_null());
- return base::Bind(&FetcherCache::CreateCachedFetcher, base::Unretained(this),
- url, real_fetcher_creator);
+ return base::Bind(&FetcherCache::CreateCachedFetcher, this, url,
+ real_fetcher_creator);
}
void FetcherCache::NotifyResourceRequested(const std::string& url) {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+ CHECK_EQ(thread_id_, SbThreadGetId());
auto iter = cache_entries_.find(url);
if (iter != cache_entries_.end()) {
@@ -266,6 +283,14 @@
}
}
+void FetcherCache::DestroySoon() {
+#if !defined(COBALT_BUILD_TYPE_GOLD)
+ CHECK(HasOneRef());
+#endif //! defined(COBALT_BUILD_TYPE_GOLD)
+
+ destroy_soon_called_ = true;
+}
+
std::unique_ptr<Fetcher> FetcherCache::CreateCachedFetcher(
const GURL& url, const Loader::FetcherCreator& real_fetcher_creator,
Fetcher::Handler* handler) {
@@ -273,6 +298,11 @@
DCHECK(!real_fetcher_creator.is_null());
DCHECK(handler);
+#if !defined(COBALT_BUILD_TYPE_GOLD)
+ CHECK(!destroy_soon_called_);
+#endif // !defined(COBALT_BUILD_TYPE_GOLD)
+ CHECK_EQ(thread_id_, SbThreadGetId());
+
auto iterator = cache_entries_.find(url.spec());
if (iterator != cache_entries_.end()) {
auto entry = iterator->second;
@@ -285,8 +315,7 @@
}
std::unique_ptr<CachedFetcherHandler> cached_handler(new CachedFetcherHandler(
- url.spec(), handler,
- base::Bind(&FetcherCache::OnFetchSuccess, base::Unretained(this))));
+ url.spec(), handler, base::Bind(&FetcherCache::OnFetchSuccess, this)));
return std::unique_ptr<Fetcher>(
new OngoingFetcher(std::move(cached_handler), real_fetcher_creator));
}
@@ -295,32 +324,44 @@
const std::string& url,
const scoped_refptr<net::HttpResponseHeaders>& headers,
const Origin& last_url_origin, bool did_fail_from_transient_error,
- std::string* data) {
+ std::string data) {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
- DCHECK(data);
- if (data->size() <= capacity_) {
- auto entry = new CacheEntry(headers, last_url_origin,
- did_fail_from_transient_error, data);
+#if !defined(COBALT_BUILD_TYPE_GOLD)
+ CHECK(!destroy_soon_called_);
+#endif // !defined(COBALT_BUILD_TYPE_GOLD)
+ CHECK_EQ(thread_id_, SbThreadGetId());
- bool inserted = cache_entries_.insert(std::make_pair(url, entry)).second;
- if (!inserted) {
- // The resource is already cached.
- delete entry;
- return;
- }
-
- total_size_ += entry->capacity();
- while (total_size_ > capacity_) {
- DCHECK(!cache_entries_.empty());
- total_size_ -= cache_entries_.begin()->second->capacity();
- delete cache_entries_.begin()->second;
- cache_entries_.erase(cache_entries_.begin());
- --count_resources_cached_;
- }
- ++count_resources_cached_;
- memory_size_in_bytes_ = total_size_;
+ if (data.capacity() > capacity_) {
+ return;
}
+
+ auto entry = new CacheEntry(headers, last_url_origin,
+ did_fail_from_transient_error, std::move(data));
+
+ bool inserted = cache_entries_.insert(std::make_pair(url, entry)).second;
+ if (!inserted) {
+ // The resource is already cached.
+ delete entry;
+ return;
+ }
+
+ total_size_ += entry->capacity();
+ ++count_resources_cached_;
+
+ while (total_size_ > capacity_) {
+ // TODO(b/270993319): For debugging cache integrity issues in production
+ // only, remove after identifying the root cause.
+ CHECK(!cache_entries_.empty());
+ CHECK_GE(total_size_, cache_entries_.begin()->second->capacity());
+
+ total_size_ -= cache_entries_.begin()->second->capacity();
+ delete cache_entries_.begin()->second;
+ cache_entries_.pop_front();
+ --count_resources_cached_;
+ }
+
+ memory_size_in_bytes_ = total_size_;
}
} // namespace loader
diff --git a/cobalt/loader/fetcher_cache.h b/cobalt/loader/fetcher_cache.h
index 32a4466..8f6e2b0 100644
--- a/cobalt/loader/fetcher_cache.h
+++ b/cobalt/loader/fetcher_cache.h
@@ -15,6 +15,7 @@
#ifndef COBALT_LOADER_FETCHER_CACHE_H_
#define COBALT_LOADER_FETCHER_CACHE_H_
+#include <atomic>
#include <memory>
#include <string>
@@ -25,6 +26,7 @@
#include "cobalt/loader/loader.h"
#include "net/base/linked_hash_map.h"
#include "net/http/http_response_headers.h"
+#include "starboard/thread.h"
#include "url/gurl.h"
#include "url/origin.h"
@@ -32,7 +34,7 @@
namespace loader {
// Manages a cache for data fetched by Fetchers.
-class FetcherCache {
+class FetcherCache : public base::RefCountedThreadSafe<FetcherCache> {
public:
FetcherCache(const char* name, size_t capacity);
~FetcherCache();
@@ -41,6 +43,13 @@
const GURL& url, const Loader::FetcherCreator& real_fetcher_creator);
void NotifyResourceRequested(const std::string& url);
+ // To signal the imminent destruction of this object. If everything is
+ // working as expected, there shouldn't be any other reference of this object,
+ // and all usages of this object should be completed.
+ // TODO(b/270993319): For debugging cache integrity issues in production only,
+ // remove after identifying the root cause.
+ void DestroySoon();
+
private:
class CacheEntry;
@@ -50,10 +59,15 @@
void OnFetchSuccess(const std::string& url,
const scoped_refptr<net::HttpResponseHeaders>& headers,
const Origin& last_url_origin,
- bool did_fail_from_transient_error, std::string* data);
+ bool did_fail_from_transient_error, std::string data);
THREAD_CHECKER(thread_checker_);
+ // TODO(b/270993319): For debugging cache integrity issues in production only,
+ // remove after identifying the root cause.
+ const SbThreadId thread_id_ = SbThreadGetId();
+ std::atomic_bool destroy_soon_called_{false};
+
const size_t capacity_;
size_t total_size_ = 0;
diff --git a/cobalt/loader/fetcher_factory_test.cc b/cobalt/loader/fetcher_factory_test.cc
index 1f579db..bae9441 100644
--- a/cobalt/loader/fetcher_factory_test.cc
+++ b/cobalt/loader/fetcher_factory_test.cc
@@ -19,6 +19,7 @@
#include "base/optional.h"
#include "base/run_loop.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "cobalt/loader/file_fetcher.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -36,14 +37,14 @@
}
void OnDone(Fetcher* fetcher) override {
CheckSameFetcher(fetcher);
- base::MessageLoop::current()->task_runner()->PostTask(
- FROM_HERE, run_loop_->QuitClosure());
+ base::ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ run_loop_->QuitClosure());
}
void OnError(Fetcher* fetcher, const std::string& error_message) override {
CheckSameFetcher(fetcher);
error_message_ = error_message;
- base::MessageLoop::current()->task_runner()->PostTask(
- FROM_HERE, run_loop_->QuitClosure());
+ base::ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ run_loop_->QuitClosure());
}
Fetcher* fetcher() const { return fetcher_; }
diff --git a/cobalt/loader/fetcher_test.h b/cobalt/loader/fetcher_test.h
index 8a14c4f..2f878c1 100644
--- a/cobalt/loader/fetcher_test.h
+++ b/cobalt/loader/fetcher_test.h
@@ -15,16 +15,16 @@
#ifndef COBALT_LOADER_FETCHER_TEST_H_
#define COBALT_LOADER_FETCHER_TEST_H_
-#include "cobalt/loader/fetcher.h"
-
#include <string>
#include "base/message_loop/message_loop.h"
#include "base/run_loop.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "cobalt/loader/fetcher.h"
#include "testing/gmock/include/gmock/gmock.h"
-using ::testing::Invoke;
using ::testing::_;
+using ::testing::Invoke;
namespace cobalt {
namespace loader {
@@ -41,13 +41,13 @@
}
void OnDone(Fetcher* fetcher) override {
CheckFetcher(fetcher);
- base::MessageLoop::current()->task_runner()->PostTask(
- FROM_HERE, run_loop_->QuitClosure());
+ base::ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ run_loop_->QuitClosure());
}
void OnError(Fetcher* fetcher, const std::string& error) override {
CheckFetcher(fetcher);
- base::MessageLoop::current()->task_runner()->PostTask(
- FROM_HERE, run_loop_->QuitClosure());
+ base::ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ run_loop_->QuitClosure());
}
const std::string& data() const { return data_; }
diff --git a/cobalt/loader/file_fetcher.cc b/cobalt/loader/file_fetcher.cc
index 11e62bf..752d051 100644
--- a/cobalt/loader/file_fetcher.cc
+++ b/cobalt/loader/file_fetcher.cc
@@ -18,6 +18,7 @@
#include "base/files/file_util.h"
#include "base/location.h"
#include "base/path_service.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "cobalt/base/cobalt_paths.h"
namespace cobalt {
@@ -105,7 +106,7 @@
FileFetcher::~FileFetcher() {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
- if (task_runner_ != base::MessageLoop::current()->task_runner()) {
+ if (task_runner_ != base::ThreadTaskRunnerHandle::Get()) {
// In case we are currently in the middle of a fetch (in which case it will
// be aborted), invalidate the weak pointers to this FileFetcher object to
// ensure that we do not process any responses from pending file I/O, which
diff --git a/cobalt/loader/file_fetcher.h b/cobalt/loader/file_fetcher.h
index 097faa1..68dee66 100644
--- a/cobalt/loader/file_fetcher.h
+++ b/cobalt/loader/file_fetcher.h
@@ -27,8 +27,8 @@
#include "base/files/platform_file.h"
#include "base/memory/ref_counted.h"
#include "base/memory/weak_ptr.h"
-#include "base/message_loop/message_loop.h"
#include "base/threading/thread_checker.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "cobalt/loader/fetcher.h"
namespace cobalt {
@@ -45,7 +45,7 @@
: buffer_size(kDefaultBufferSize),
start_offset(0),
bytes_to_read(std::numeric_limits<int64>::max()),
- message_loop_proxy(base::MessageLoop::current()->task_runner()) {}
+ message_loop_proxy(base::ThreadTaskRunnerHandle::Get()) {}
int32 buffer_size;
int64 start_offset;
diff --git a/cobalt/loader/loader.cc b/cobalt/loader/loader.cc
index 2e768ec..f33aaf9 100644
--- a/cobalt/loader/loader.cc
+++ b/cobalt/loader/loader.cc
@@ -12,14 +12,16 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#include <memory>
-
#include "cobalt/loader/loader.h"
+#include <memory>
+#include <utility>
+
#include "base/bind.h"
#include "base/compiler_specific.h"
#include "base/logging.h"
#include "base/message_loop/message_loop.h"
+#include "base/threading/thread_task_runner_handle.h"
namespace cobalt {
namespace loader {
@@ -161,8 +163,8 @@
fetcher_ = fetcher_creator_.Run(fetcher_handler_to_decoder_adaptor_.get());
if (fetcher_) {
- fetcher_->SetLoadTimingInfoCallback(base::Bind(&Loader::set_load_timing_info,
- base::Unretained(this)));
+ fetcher_->SetLoadTimingInfoCallback(
+ base::Bind(&Loader::set_load_timing_info, base::Unretained(this)));
}
// Post the error callback on the current message loop in case the loader is
@@ -171,7 +173,7 @@
fetcher_creator_error_closure_.Reset(
base::Bind(base::Bind(&Loader::LoadComplete, base::Unretained(this)),
std::string("Fetcher was not created.")));
- base::MessageLoop::current()->task_runner()->PostTask(
+ base::ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE, fetcher_creator_error_closure_.callback());
}
}
@@ -180,9 +182,7 @@
load_timing_info_ = timing_info;
}
-net::LoadTimingInfo Loader::get_load_timing_info() {
- return load_timing_info_;
-}
+net::LoadTimingInfo Loader::get_load_timing_info() { return load_timing_info_; }
} // namespace loader
} // namespace cobalt
diff --git a/cobalt/loader/loader_factory.cc b/cobalt/loader/loader_factory.cc
index 0cb575e..fcbe2d2 100644
--- a/cobalt/loader/loader_factory.cc
+++ b/cobalt/loader/loader_factory.cc
@@ -31,7 +31,13 @@
debugger_hooks_(debugger_hooks),
resource_provider_(resource_provider) {
if (encoded_image_cache_capacity > 0) {
- fetcher_cache_.reset(new FetcherCache(name, encoded_image_cache_capacity));
+ fetcher_cache_ = new FetcherCache(name, encoded_image_cache_capacity);
+ }
+}
+
+LoaderFactory::~LoaderFactory() {
+ if (fetcher_cache_) {
+ fetcher_cache_->DestroySoon();
}
}
diff --git a/cobalt/loader/loader_factory.h b/cobalt/loader/loader_factory.h
index e3f80ff..7602602 100644
--- a/cobalt/loader/loader_factory.h
+++ b/cobalt/loader/loader_factory.h
@@ -47,6 +47,7 @@
const base::DebuggerHooks& debugger_hooks,
size_t encoded_image_cache_capacity,
base::ThreadPriority loader_thread_priority);
+ ~LoaderFactory();
// Creates a loader that fetches and decodes an image.
std::unique_ptr<Loader> CreateImageLoader(
@@ -108,7 +109,7 @@
// Used to cache the fetched raw data. Note that currently the cache is only
// used to cache Image data. We may introduce more caches once we want to
// cache fetched data for other resource types.
- std::unique_ptr<FetcherCache> fetcher_cache_;
+ scoped_refptr<FetcherCache> fetcher_cache_;
// Used with CLOG to report errors with the image source.
const base::DebuggerHooks& debugger_hooks_;
diff --git a/cobalt/loader/loader_test.cc b/cobalt/loader/loader_test.cc
index 424f402..fb9f281 100644
--- a/cobalt/loader/loader_test.cc
+++ b/cobalt/loader/loader_test.cc
@@ -12,25 +12,26 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#include <memory>
-
#include "cobalt/loader/loader.h"
+#include <memory>
+
#include "base/bind.h"
#include "base/files/file_util.h"
#include "base/message_loop/message_loop.h"
#include "base/optional.h"
#include "base/path_service.h"
#include "base/run_loop.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "cobalt/loader/file_fetcher.h"
#include "cobalt/loader/text_decoder.h"
#include "cobalt/web/url_utils.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
+using ::testing::_;
using ::testing::InSequence;
using ::testing::Invoke;
-using ::testing::_;
namespace cobalt {
namespace loader {
@@ -46,8 +47,8 @@
void OnDone(const Origin&, std::unique_ptr<std::string> text) {
text_ = *text;
- base::MessageLoop::current()->task_runner()->PostTask(
- FROM_HERE, run_loop_->QuitClosure());
+ base::ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ run_loop_->QuitClosure());
}
std::string text() { return text_; }
@@ -67,8 +68,8 @@
DLOG(ERROR) << *text;
if (run_loop_)
- base::MessageLoop::current()->task_runner()->PostTask(
- FROM_HERE, run_loop_->QuitClosure());
+ base::ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ run_loop_->QuitClosure());
}
private:
diff --git a/cobalt/loader/net_fetcher.cc b/cobalt/loader/net_fetcher.cc
index 7f43139..d825fc4 100644
--- a/cobalt/loader/net_fetcher.cc
+++ b/cobalt/loader/net_fetcher.cc
@@ -20,6 +20,7 @@
#include "base/memory/ptr_util.h"
#include "base/strings/stringprintf.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "base/trace_event/trace_event.h"
#include "cobalt/base/polymorphic_downcast.h"
#include "cobalt/loader/cors_preflight.h"
@@ -104,7 +105,7 @@
request_cross_origin_(false),
origin_(origin),
request_script_(options.resource_type == disk_cache::kUncompiledScript),
- task_runner_(base::MessageLoop::current()->task_runner()),
+ task_runner_(base::ThreadTaskRunnerHandle::Get()),
skip_fetch_intercept_(options.skip_fetch_intercept),
will_destroy_current_message_loop_(false),
main_resource_(main_resource) {
@@ -134,12 +135,13 @@
net::LOAD_DO_NOT_SEND_COOKIES | net::LOAD_DO_NOT_SEND_AUTH_DATA;
url_fetcher_->SetLoadFlags(kDisableCookiesLoadFlags);
}
+ network_module->AddClientHintHeaders(*url_fetcher_);
// Delay the actual start until this function is complete. Otherwise we might
// call handler's callbacks at an unexpected time- e.g. receiving OnError()
// while a loader is still being constructed.
- base::MessageLoop::current()->task_runner()->PostTask(
- FROM_HERE, start_callback_.callback());
+ base::ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ start_callback_.callback());
base::MessageLoop::current()->AddDestructionObserver(this);
}
@@ -178,7 +180,7 @@
if (will_destroy_current_message_loop_.load()) {
return;
}
- if (task_runner_ != base::MessageLoop::current()->task_runner()) {
+ if (task_runner_ != base::ThreadTaskRunnerHandle::Get()) {
task_runner_->PostTask(
FROM_HERE, base::BindOnce(&NetFetcher::OnFetchIntercepted,
base::Unretained(this), std::move(body)));
diff --git a/cobalt/media/base/drm_system.cc b/cobalt/media/base/drm_system.cc
index f92f1a7..f3d0a1f 100644
--- a/cobalt/media/base/drm_system.cc
+++ b/cobalt/media/base/drm_system.cc
@@ -21,6 +21,7 @@
#include "base/compiler_specific.h"
#include "base/logging.h"
#include "base/message_loop/message_loop.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "cobalt/base/instance_counter.h"
namespace cobalt {
@@ -85,7 +86,7 @@
key_system, this, OnSessionUpdateRequestGeneratedFunc,
OnSessionUpdatedFunc, OnSessionKeyStatusesChangedFunc,
OnServerCertificateUpdatedFunc, OnSessionClosedFunc)),
- message_loop_(base::MessageLoop::current()->task_runner()),
+ message_loop_(base::ThreadTaskRunnerHandle::Get()),
ALLOW_THIS_IN_INITIALIZER_LIST(weak_ptr_factory_(this)),
weak_this_(weak_ptr_factory_.GetWeakPtr()) {
ON_INSTANCE_CREATED(DrmSystem);
diff --git a/cobalt/media/base/pipeline.h b/cobalt/media/base/pipeline.h
index ccb3a1b..b930107 100644
--- a/cobalt/media/base/pipeline.h
+++ b/cobalt/media/base/pipeline.h
@@ -98,6 +98,9 @@
const GetDecodeTargetGraphicsContextProviderFunc&
get_decode_target_graphics_context_provider_func,
bool allow_resume_after_suspend, bool allow_batched_sample_write,
+#if SB_API_VERSION >= 15
+ SbTime audio_write_duration_local, SbTime audio_write_duration_remote,
+#endif // SB_API_VERSION >= 15
MediaLog* media_log, DecodeTargetProvider* decode_target_provider);
virtual ~Pipeline() {}
@@ -217,6 +220,9 @@
// be 0.
virtual void GetNaturalVideoSize(gfx::Size* out_size) const = 0;
+ // Gets the names of audio connectors used by the audio output.
+ virtual std::vector<std::string> GetAudioConnectors() const = 0;
+
// Return true if loading progress has been made since the last time this
// method was called.
virtual bool DidLoadingProgress() const = 0;
diff --git a/cobalt/media/base/sbplayer_bridge.cc b/cobalt/media/base/sbplayer_bridge.cc
index 7194560..1fb4c81 100644
--- a/cobalt/media/base/sbplayer_bridge.cc
+++ b/cobalt/media/base/sbplayer_bridge.cc
@@ -65,11 +65,11 @@
SbMediaAudioSampleInfo* sample_info) {
DCHECK(sample_info);
-#if SB_API_VERSION >= SB_MEDIA_ENHANCED_AUDIO_API_VERSION
+#if SB_API_VERSION >= 15
sample_info->stream_info = stream_info;
-#else // SB_API_VERSION >= SB_MEDIA_ENHANCED_AUDIO_API_VERSION
+#else // SB_API_VERSION >= 15
*sample_info = stream_info;
-#endif // SB_API_VERSION >= SB_MEDIA_ENHANCED_AUDIO_API_VERSION}
+#endif // SB_API_VERSION >= 15}
}
void SetStreamInfo(
@@ -89,11 +89,11 @@
SbMediaVideoSampleInfo* sample_info) {
DCHECK(sample_info);
-#if SB_API_VERSION >= SB_MEDIA_ENHANCED_AUDIO_API_VERSION
+#if SB_API_VERSION >= 15
sample_info->stream_info = stream_info;
-#else // SB_API_VERSION >= SB_MEDIA_ENHANCED_AUDIO_API_VERSION
+#else // SB_API_VERSION >= 15
*sample_info = stream_info;
-#endif // SB_API_VERSION >= SB_MEDIA_ENHANCED_AUDIO_API_VERSION}
+#endif // SB_API_VERSION >= 15}
}
void SetDiscardPadding(
@@ -110,10 +110,10 @@
SbMediaAudioSampleInfo* sample_info) {
DCHECK(sample_info);
-#if SB_API_VERSION >= SB_MEDIA_ENHANCED_AUDIO_API_VERSION
+#if SB_API_VERSION >= 15
sample_info->discarded_duration_from_front = discard_padding.first.ToSbTime();
sample_info->discarded_duration_from_back = discard_padding.second.ToSbTime();
-#endif // SB_API_VERSION >= SB_MEDIA_ENHANCED_AUDIO_API_VERSION}
+#endif // SB_API_VERSION >= 15}
}
} // namespace
@@ -451,6 +451,37 @@
GetInfo_Locked(video_frames_decoded, video_frames_dropped, media_time);
}
+#if SB_API_VERSION >= 15
+std::vector<SbMediaAudioConfiguration>
+SbPlayerBridge::GetAudioConfigurations() {
+ base::AutoLock auto_lock(lock_);
+
+ if (!SbPlayerIsValid(player_)) {
+ return std::vector<SbMediaAudioConfiguration>();
+ }
+
+ std::vector<SbMediaAudioConfiguration> configurations;
+
+ // Set a limit to avoid infinite loop.
+ constexpr int kMaxAudioConfigurations = 32;
+
+ for (int i = 0; i < kMaxAudioConfigurations; ++i) {
+ SbMediaAudioConfiguration configuration;
+ if (!sbplayer_interface_->GetAudioConfiguration(player_, i,
+ &configuration)) {
+ break;
+ }
+
+ configurations.push_back(configuration);
+ }
+
+ LOG_IF(WARNING, configurations.empty())
+ << "Failed to find any audio configurations.";
+
+ return configurations;
+}
+#endif // SB_API_VERSION >= 15
+
#if SB_HAS(PLAYER_WITH_URL)
void SbPlayerBridge::GetUrlPlayerBufferedTimeRanges(
base::TimeDelta* buffer_start_time, base::TimeDelta* buffer_length_time) {
@@ -491,11 +522,11 @@
DCHECK(SbPlayerIsValid(player_));
-#if SB_API_VERSION >= SB_MEDIA_ENHANCED_AUDIO_API_VERSION
+#if SB_API_VERSION >= 15
SbPlayerInfo out_player_info;
-#else // SB_API_VERSION >= SB_MEDIA_ENHANCED_AUDIO_API_VERSION
+#else // SB_API_VERSION >= 15
SbPlayerInfo2 out_player_info;
-#endif // SB_API_VERSION >= SB_MEDIA_ENHANCED_AUDIO_API_VERSION
+#endif // SB_API_VERSION >= 15
sbplayer_interface_->GetInfo(player_, &out_player_info);
video_stream_info_.frame_width = out_player_info.frame_width;
@@ -514,11 +545,11 @@
DCHECK(SbPlayerIsValid(player_));
-#if SB_API_VERSION >= SB_MEDIA_ENHANCED_AUDIO_API_VERSION
+#if SB_API_VERSION >= 15
SbPlayerInfo info;
-#else // SB_API_VERSION >= SB_MEDIA_ENHANCED_AUDIO_API_VERSION
+#else // SB_API_VERSION >= 15
SbPlayerInfo2 info;
-#endif // SB_API_VERSION >= SB_MEDIA_ENHANCED_AUDIO_API_VERSION
+#endif // SB_API_VERSION >= 15
sbplayer_interface_->GetInfo(player_, &info);
if (info.duration == SB_PLAYER_NO_DURATION) {
// URL-based player may not have loaded asset yet, so map no duration to 0.
@@ -536,11 +567,11 @@
DCHECK(SbPlayerIsValid(player_));
-#if SB_API_VERSION >= SB_MEDIA_ENHANCED_AUDIO_API_VERSION
+#if SB_API_VERSION >= 15
SbPlayerInfo info;
-#else // SB_API_VERSION >= SB_MEDIA_ENHANCED_AUDIO_API_VERSION
+#else // SB_API_VERSION >= 15
SbPlayerInfo2 info;
-#endif // SB_API_VERSION >= SB_MEDIA_ENHANCED_AUDIO_API_VERSION
+#endif // SB_API_VERSION >= 15
sbplayer_interface_->GetInfo(player_, &info);
return base::TimeDelta::FromMicroseconds(info.start_date);
}
@@ -700,22 +731,22 @@
SbPlayerCreationParam creation_param = {};
creation_param.drm_system = drm_system_;
-#if SB_API_VERSION >= SB_MEDIA_ENHANCED_AUDIO_API_VERSION
+#if SB_API_VERSION >= 15
creation_param.audio_stream_info = audio_stream_info_;
creation_param.video_stream_info = video_stream_info_;
-#else // SB_API_VERSION >= SB_MEDIA_ENHANCED_AUDIO_API_VERSION
+#else // SB_API_VERSION >= 15
creation_param.audio_sample_info = audio_stream_info_;
creation_param.video_sample_info = video_stream_info_;
-#endif // SB_API_VERSION >= SB_MEDIA_ENHANCED_AUDIO_API_VERSION
+#endif // SB_API_VERSION >= 15
// TODO: This is temporary for supporting background media playback.
// Need to be removed with media refactor.
if (!is_visible) {
-#if SB_API_VERSION >= SB_MEDIA_ENHANCED_AUDIO_API_VERSION
+#if SB_API_VERSION >= 15
creation_param.video_stream_info.codec = kSbMediaVideoCodecNone;
-#else // SB_API_VERSION >= SB_MEDIA_ENHANCED_AUDIO_API_VERSION
+#else // SB_API_VERSION >= 15
creation_param.video_sample_info.codec = kSbMediaVideoCodecNone;
-#endif // SB_API_VERSION >= SB_MEDIA_ENHANCED_AUDIO_API_VERSION
+#endif // SB_API_VERSION >= 15
}
creation_param.output_mode = output_mode_;
DCHECK_EQ(sbplayer_interface_->GetPreferredOutputMode(&creation_param),
@@ -932,11 +963,11 @@
DCHECK(SbPlayerIsValid(player_));
-#if SB_API_VERSION >= SB_MEDIA_ENHANCED_AUDIO_API_VERSION
+#if SB_API_VERSION >= 15
SbPlayerInfo info;
-#else // SB_API_VERSION >= SB_MEDIA_ENHANCED_AUDIO_API_VERSION
+#else // SB_API_VERSION >= 15
SbPlayerInfo2 info;
-#endif // SB_API_VERSION >= SB_MEDIA_ENHANCED_AUDIO_API_VERSION
+#endif // SB_API_VERSION >= 15
sbplayer_interface_->GetInfo(player_, &info);
if (media_time) {
@@ -1183,13 +1214,13 @@
bool prefer_decode_to_texture) const {
SbPlayerCreationParam creation_param = {};
creation_param.drm_system = drm_system_;
-#if SB_API_VERSION >= SB_MEDIA_ENHANCED_AUDIO_API_VERSION
+#if SB_API_VERSION >= 15
creation_param.audio_stream_info = audio_stream_info_;
creation_param.video_stream_info = video_stream_info_;
-#else // SB_API_VERSION >= SB_MEDIA_ENHANCED_AUDIO_API_VERSION
+#else // SB_API_VERSION >= 15
creation_param.audio_sample_info = audio_stream_info_;
creation_param.video_sample_info = video_stream_info_;
-#endif // SB_API_VERSION >= SB_MEDIA_ENHANCED_AUDIO_API_VERSION
+#endif // SB_API_VERSION >= 15
// Try to choose |kSbPlayerOutputModeDecodeToTexture| when
// |prefer_decode_to_texture| is true.
diff --git a/cobalt/media/base/sbplayer_bridge.h b/cobalt/media/base/sbplayer_bridge.h
index 3ea0b76..ca7163e 100644
--- a/cobalt/media/base/sbplayer_bridge.h
+++ b/cobalt/media/base/sbplayer_bridge.h
@@ -115,6 +115,9 @@
void SetPlaybackRate(double playback_rate);
void GetInfo(uint32* video_frames_decoded, uint32* video_frames_dropped,
base::TimeDelta* media_time);
+#if SB_API_VERSION >= 15
+ std::vector<SbMediaAudioConfiguration> GetAudioConfigurations();
+#endif // SB_API_VERSION >= 15
#if SB_HAS(PLAYER_WITH_URL)
void GetUrlPlayerBufferedTimeRanges(base::TimeDelta* buffer_start_time,
diff --git a/cobalt/media/base/sbplayer_interface.cc b/cobalt/media/base/sbplayer_interface.cc
index a3777f0..df33c90 100644
--- a/cobalt/media/base/sbplayer_interface.cc
+++ b/cobalt/media/base/sbplayer_interface.cc
@@ -61,11 +61,11 @@
void DefaultSbPlayerInterface::Seek(SbPlayer player, SbTime seek_to_timestamp,
int ticket) {
-#if SB_API_VERSION >= SB_MEDIA_ENHANCED_AUDIO_API_VERSION
+#if SB_API_VERSION >= 15
SbPlayerSeek(player, seek_to_timestamp, ticket);
-#else // SB_API_VERSION >= SB_MEDIA_ENHANCED_AUDIO_API_VERSION
+#else // SB_API_VERSION >= 15
SbPlayerSeek2(player, seek_to_timestamp, ticket);
-#endif // SB_API_VERSION >= SB_MEDIA_ENHANCED_AUDIO_API_VERSION
+#endif // SB_API_VERSION >= 15
}
bool DefaultSbPlayerInterface::IsEnhancedAudioExtensionEnabled() const {
@@ -76,13 +76,13 @@
SbPlayer player, SbMediaType sample_type,
const SbPlayerSampleInfo* sample_infos, int number_of_sample_infos) {
DCHECK(!IsEnhancedAudioExtensionEnabled());
-#if SB_API_VERSION >= SB_MEDIA_ENHANCED_AUDIO_API_VERSION
+#if SB_API_VERSION >= 15
SbPlayerWriteSamples(player, sample_type, sample_infos,
number_of_sample_infos);
-#else // SB_API_VERSION >= SB_MEDIA_ENHANCED_AUDIO_API_VERSION
+#else // SB_API_VERSION >= 15
SbPlayerWriteSample2(player, sample_type, sample_infos,
number_of_sample_infos);
-#endif // SB_API_VERSION >= SB_MEDIA_ENHANCED_AUDIO_API_VERSION
+#endif // SB_API_VERSION >= 15
}
void DefaultSbPlayerInterface::WriteSamples(
@@ -119,13 +119,13 @@
}
void DefaultSbPlayerInterface::GetInfo(SbPlayer player,
-#if SB_API_VERSION >= SB_MEDIA_ENHANCED_AUDIO_API_VERSION
+#if SB_API_VERSION >= 15
SbPlayerInfo* out_player_info) {
SbPlayerGetInfo(player, out_player_info);
-#else // SB_API_VERSION >= SB_MEDIA_ENHANCED_AUDIO_API_VERSION
+#else // SB_API_VERSION >= 15
SbPlayerInfo2* out_player_info2) {
SbPlayerGetInfo2(player, out_player_info2);
-#endif // SB_API_VERSION >= SB_MEDIA_ENHANCED_AUDIO_API_VERSION
+#endif // SB_API_VERSION >= 15
}
SbDecodeTarget DefaultSbPlayerInterface::GetCurrentFrame(SbPlayer player) {
@@ -159,5 +159,15 @@
}
#endif // SB_HAS(PLAYER_WITH_URL)
+#if SB_API_VERSION >= 15
+
+bool DefaultSbPlayerInterface::GetAudioConfiguration(
+ SbPlayer player, int index,
+ SbMediaAudioConfiguration* out_audio_configuration) {
+ return SbPlayerGetAudioConfiguration(player, index, out_audio_configuration);
+}
+
+#endif // SB_API_VERSION >= 15
+
} // namespace media
} // namespace cobalt
diff --git a/cobalt/media/base/sbplayer_interface.h b/cobalt/media/base/sbplayer_interface.h
index 893f1a2..d9a3174 100644
--- a/cobalt/media/base/sbplayer_interface.h
+++ b/cobalt/media/base/sbplayer_interface.h
@@ -59,11 +59,11 @@
virtual bool SetPlaybackRate(SbPlayer player, double playback_rate) = 0;
virtual void SetVolume(SbPlayer player, double volume) = 0;
-#if SB_API_VERSION >= SB_MEDIA_ENHANCED_AUDIO_API_VERSION
+#if SB_API_VERSION >= 15
virtual void GetInfo(SbPlayer player, SbPlayerInfo* out_player_info) = 0;
-#else // SB_API_VERSION >= SB_MEDIA_ENHANCED_AUDIO_API_VERSION
+#else // SB_API_VERSION >= 15
virtual void GetInfo(SbPlayer player, SbPlayerInfo2* out_player_info2) = 0;
-#endif // SB_API_VERSION >= SB_MEDIA_ENHANCED_AUDIO_API_VERSION
+#endif // SB_API_VERSION >= 15
virtual SbDecodeTarget GetCurrentFrame(SbPlayer player) = 0;
#if SB_HAS(PLAYER_WITH_URL)
@@ -81,6 +81,12 @@
SbPlayer player, SbUrlPlayerExtraInfo* out_url_player_info) = 0;
#endif // SB_HAS(PLAYER_WITH_URL)
+#if SB_API_VERSION >= 15
+ virtual bool GetAudioConfiguration(
+ SbPlayer player, int index,
+ SbMediaAudioConfiguration* out_audio_configuration) = 0;
+#endif // SB_API_VERSION >= 15
+
// disabled by default, but can be enabled via h5vcc setting.
void EnableCValStats(bool should_enable) {
cval_stats_.Enable(should_enable);
@@ -118,11 +124,11 @@
int height) override;
bool SetPlaybackRate(SbPlayer player, double playback_rate) override;
void SetVolume(SbPlayer player, double volume) override;
-#if SB_API_VERSION >= SB_MEDIA_ENHANCED_AUDIO_API_VERSION
+#if SB_API_VERSION >= 15
void GetInfo(SbPlayer player, SbPlayerInfo* out_player_info) override;
-#else // SB_API_VERSION >= SB_MEDIA_ENHANCED_AUDIO_API_VERSION
+#else // SB_API_VERSION >= 15
void GetInfo(SbPlayer player, SbPlayerInfo2* out_player_info2) override;
-#endif // SB_API_VERSION >= SB_MEDIA_ENHANCED_AUDIO_API_VERSION
+#endif // SB_API_VERSION >= 15
SbDecodeTarget GetCurrentFrame(SbPlayer player) override;
#if SB_HAS(PLAYER_WITH_URL)
@@ -138,6 +144,12 @@
SbPlayer player, SbUrlPlayerExtraInfo* out_url_player_info) override;
#endif // SB_HAS(PLAYER_WITH_URL)
+#if SB_API_VERSION >= 15
+ bool GetAudioConfiguration(
+ SbPlayer player, int index,
+ SbMediaAudioConfiguration* out_audio_configuration) override;
+#endif // SB_API_VERSION >= 15
+
private:
void (*enhanced_audio_player_write_samples_)(
SbPlayer player, SbMediaType sample_type,
diff --git a/cobalt/media/base/sbplayer_pipeline.cc b/cobalt/media/base/sbplayer_pipeline.cc
index 78d7b6b..9523a9e 100644
--- a/cobalt/media/base/sbplayer_pipeline.cc
+++ b/cobalt/media/base/sbplayer_pipeline.cc
@@ -36,6 +36,7 @@
#include "cobalt/media/base/playback_statistics.h"
#include "cobalt/media/base/sbplayer_bridge.h"
#include "cobalt/media/base/sbplayer_set_bounds_helper.h"
+#include "starboard/common/media.h"
#include "starboard/common/string.h"
#include "starboard/configuration_constants.h"
#include "starboard/time.h"
@@ -66,6 +67,7 @@
using ::media::PipelineStatistics;
using ::media::PipelineStatusCallback;
using ::media::VideoDecoderConfig;
+using ::starboard::GetMediaAudioConnectorName;
static const int kRetryDelayAtSuspendInMilliseconds = 100;
@@ -90,6 +92,37 @@
#endif // SB_HAS(PLAYER_WITH_URL)
};
+#if SB_API_VERSION >= 15
+bool HasRemoteAudioOutputs(
+ const std::vector<SbMediaAudioConfiguration>& configurations) {
+ for (auto&& configuration : configurations) {
+ const auto connector = configuration.connector;
+ switch (connector) {
+ case kSbMediaAudioConnectorUnknown:
+ case kSbMediaAudioConnectorAnalog:
+ case kSbMediaAudioConnectorBuiltIn:
+ case kSbMediaAudioConnectorHdmi:
+ case kSbMediaAudioConnectorSpdif:
+ case kSbMediaAudioConnectorUsb:
+ LOG(INFO) << "Encountered local audio connector: "
+ << GetMediaAudioConnectorName(connector);
+ break;
+ case kSbMediaAudioConnectorBluetooth:
+ case kSbMediaAudioConnectorRemoteWired:
+ case kSbMediaAudioConnectorRemoteWireless:
+ case kSbMediaAudioConnectorRemoteOther:
+ LOG(INFO) << "Encountered remote audio connector: "
+ << GetMediaAudioConnectorName(connector);
+ return true;
+ }
+ }
+
+ LOG(INFO) << "No remote audio outputs found.";
+
+ return false;
+}
+#endif // SB_API_VERSION >= 15
+
// SbPlayerPipeline is a PipelineBase implementation that uses the SbPlayer
// interface internally.
class MEDIA_EXPORT SbPlayerPipeline : public Pipeline,
@@ -103,6 +136,9 @@
const GetDecodeTargetGraphicsContextProviderFunc&
get_decode_target_graphics_context_provider_func,
bool allow_resume_after_suspend, bool allow_batched_sample_write,
+#if SB_API_VERSION >= 15
+ SbTime audio_write_duration_local, SbTime audio_write_duration_remote,
+#endif // SB_API_VERSION >= 15
MediaLog* media_log, DecodeTargetProvider* decode_target_provider);
~SbPlayerPipeline() override;
@@ -148,6 +184,7 @@
TimeDelta GetMediaStartDate() const override;
#endif // SB_HAS(PLAYER_WITH_URL)
void GetNaturalVideoSize(gfx::Size* out_size) const override;
+ std::vector<std::string> GetAudioConnectors() const override;
bool DidLoadingProgress() const override;
PipelineStatistics GetStatistics() const override;
@@ -322,19 +359,31 @@
DecodeTargetProvider* decode_target_provider_;
+#if SB_API_VERSION >= 15
+ const SbTime audio_write_duration_local_;
+ const SbTime audio_write_duration_remote_;
+
+ // The two variables below should always contain the same value. They are
+ // kept as separate variables so we can keep the existing implementation as
+ // is, which simplifies the implementation across multiple Starboard versions.
+ SbTime audio_write_duration_ = 0;
+ SbTime audio_write_duration_for_preroll_ = audio_write_duration_;
+#else // SB_API_VERSION >= 15
// Read audio from the stream if |timestamp_of_last_written_audio_| is less
- // than |seek_time_| + |kAudioPrerollLimit|, this effectively allows 10
- // seconds of audio to be written to the SbPlayer after playback startup or
- // seek.
- static const SbTime kAudioPrerollLimit = 10 * kSbTimeSecond;
- // Don't read audio from the stream more than |kAudioLimit| ahead of the
- // current media time during playing.
- static const SbTime kAudioLimit = kSbTimeSecond;
+ // than |seek_time_| + |audio_write_duration_for_preroll_|, this effectively
+ // allows 10 seconds of audio to be written to the SbPlayer after playback
+ // startup or seek.
+ SbTime audio_write_duration_for_preroll_ = 10 * kSbTimeSecond;
+ // Don't read audio from the stream more than |audio_write_duration_| ahead of
+ // the current media time during playing.
+ SbTime audio_write_duration_ = kSbTimeSecond;
+#endif // SB_API_VERSION >= 15
// Only call GetMediaTime() from OnNeedData if it has been
// |kMediaTimeCheckInterval| since the last call to GetMediaTime().
static const SbTime kMediaTimeCheckInterval = 0.1 * kSbTimeSecond;
// Timestamp for the last written audio.
SbTime timestamp_of_last_written_audio_ = 0;
+
// Last media time reported by GetMediaTime().
base::CVal<SbTime> last_media_time_;
// Time when we last checked the media time.
@@ -359,6 +408,9 @@
const GetDecodeTargetGraphicsContextProviderFunc&
get_decode_target_graphics_context_provider_func,
bool allow_resume_after_suspend, bool allow_batched_sample_write,
+#if SB_API_VERSION >= 15
+ SbTime audio_write_duration_local, SbTime audio_write_duration_remote,
+#endif // SB_API_VERSION >= 15
MediaLog* media_log, DecodeTargetProvider* decode_target_provider)
: pipeline_identifier_(
base::StringPrintf("%X", g_pipeline_identifier_counter++)),
@@ -400,6 +452,10 @@
kSbPlayerStateInitialized,
"The underlying SbPlayer state of the media pipeline."),
decode_target_provider_(decode_target_provider),
+#if SB_API_VERSION >= 15
+ audio_write_duration_local_(audio_write_duration_local),
+ audio_write_duration_remote_(audio_write_duration_remote),
+#endif // SB_API_VERSION >= 15
last_media_time_(base::StringPrintf("Media.Pipeline.%s.LastMediaTime",
pipeline_identifier_.c_str()),
0, "Last media time reported by the underlying player."),
@@ -408,7 +464,12 @@
pipeline_identifier_.c_str()),
"", "The max video capabilities required for the media pipeline."),
playback_statistics_(pipeline_identifier_) {
- SbMediaSetAudioWriteDuration(kAudioLimit);
+#if SB_API_VERSION < 15
+ SbMediaSetAudioWriteDuration(audio_write_duration_);
+ LOG(INFO) << "Setting audio write duration to " << audio_write_duration_
+ << ", the duration during preroll is "
+ << audio_write_duration_for_preroll_;
+#endif // SB_API_VERSION < 15
}
SbPlayerPipeline::~SbPlayerPipeline() { DCHECK(!player_bridge_); }
@@ -790,6 +851,26 @@
*out_size = natural_size_;
}
+std::vector<std::string> SbPlayerPipeline::GetAudioConnectors() const {
+#if SB_API_VERSION >= 15
+ base::AutoLock auto_lock(lock_);
+ if (!player_bridge_) {
+ return std::vector<std::string>();
+ }
+
+ std::vector<std::string> connectors;
+
+ auto configurations = player_bridge_->GetAudioConfigurations();
+ for (auto&& configuration : configurations) {
+ connectors.push_back(GetMediaAudioConnectorName(configuration.connector));
+ }
+
+ return connectors;
+#else // SB_API_VERSION >= 15
+ return std::vector<std::string>();
+#endif // SB_API_VERSION >= 15
+}
+
bool SbPlayerPipeline::DidLoadingProgress() const {
base::AutoLock auto_lock(lock_);
bool ret = did_loading_progress_;
@@ -1045,6 +1126,18 @@
*decode_to_texture_output_mode_, decode_target_provider_,
max_video_capabilities_, pipeline_identifier_));
if (player_bridge_->IsValid()) {
+#if SB_API_VERSION >= 15
+ // TODO(b/267678497): When `player_bridge_->GetAudioConfigurations()`
+ // returns no audio configurations, update the write durations again
+ // before the SbPlayer reaches `kSbPlayerStatePresenting`.
+ audio_write_duration_for_preroll_ = audio_write_duration_ =
+ HasRemoteAudioOutputs(player_bridge_->GetAudioConfigurations())
+ ? audio_write_duration_remote_
+ : audio_write_duration_local_;
+ LOG(INFO) << "SbPlayerBridge created, with audio write duration at "
+ << audio_write_duration_for_preroll_;
+#endif // SB_API_VERSION >= 15
+
SetPlaybackRateTask(playback_rate_);
SetVolumeTask(volume_);
} else {
@@ -1278,17 +1371,18 @@
GetMediaTime();
}
- // Delay reading audio more than |kAudioLimit| ahead of playback after the
- // player has received enough audio for preroll, taking into account that
- // our estimate of playback time might be behind by
+ // Delay reading audio more than |audio_write_duration_| ahead of playback
+ // after the player has received enough audio for preroll, taking into
+ // account that our estimate of playback time might be behind by
// |kMediaTimeCheckInterval|.
if (timestamp_of_last_written_audio_ - seek_time_.ToSbTime() >
- kAudioPrerollLimit) {
+ audio_write_duration_for_preroll_) {
// The estimated time ahead of playback may be negative if no audio has
// been written.
SbTime time_ahead_of_playback =
timestamp_of_last_written_audio_ - last_media_time_;
- if (time_ahead_of_playback > (kAudioLimit + kMediaTimeCheckInterval)) {
+ if (time_ahead_of_playback >
+ (audio_write_duration_ + kMediaTimeCheckInterval)) {
task_runner_->PostDelayedTask(
FROM_HERE,
base::Bind(&SbPlayerPipeline::DelayedNeedData, this, max_buffers),
@@ -1358,6 +1452,14 @@
playback_statistics_.OnPresenting(
video_stream_->video_decoder_config());
}
+#if SB_API_VERSION >= 15
+ audio_write_duration_for_preroll_ = audio_write_duration_ =
+ HasRemoteAudioOutputs(player_bridge_->GetAudioConfigurations())
+ ? audio_write_duration_remote_
+ : audio_write_duration_local_;
+ LOG(INFO) << "SbPlayerBridge reaches kSbPlayerStatePresenting, with audio"
+ << " write duration at " << audio_write_duration_;
+#endif // SB_API_VERSION >= 15
break;
}
case kSbPlayerStateEndOfStream:
@@ -1612,12 +1714,18 @@
const GetDecodeTargetGraphicsContextProviderFunc&
get_decode_target_graphics_context_provider_func,
bool allow_resume_after_suspend, bool allow_batched_sample_write,
+#if SB_API_VERSION >= 15
+ SbTime audio_write_duration_local, SbTime audio_write_duration_remote,
+#endif // SB_API_VERSION >= 15
MediaLog* media_log, DecodeTargetProvider* decode_target_provider) {
- return new SbPlayerPipeline(interface, window, task_runner,
- get_decode_target_graphics_context_provider_func,
- allow_resume_after_suspend,
- allow_batched_sample_write, media_log,
- decode_target_provider);
+ return new SbPlayerPipeline(
+ interface, window, task_runner,
+ get_decode_target_graphics_context_provider_func,
+ allow_resume_after_suspend, allow_batched_sample_write,
+#if SB_API_VERSION >= 15
+ audio_write_duration_local, audio_write_duration_remote,
+#endif // SB_API_VERSION >= 15
+ media_log, decode_target_provider);
}
} // namespace media
diff --git a/cobalt/media/media_module.cc b/cobalt/media/media_module.cc
index 95965a5..00db9fe 100644
--- a/cobalt/media/media_module.cc
+++ b/cobalt/media/media_module.cc
@@ -194,6 +194,18 @@
LOG(INFO) << (value ? "Enabling" : "Disabling")
<< " media metrics collection.";
return true;
+#if SB_API_VERSION >= 15
+ } else if (name == "AudioWriteDurationLocal" && value > 0) {
+ audio_write_duration_local_ = value;
+ LOG(INFO) << "Set AudioWriteDurationLocal to "
+ << audio_write_duration_local_;
+ return true;
+ } else if (name == "AudioWriteDurationRemote" && value > 0) {
+ audio_write_duration_remote_ = value;
+ LOG(INFO) << "Set AudioWriteDurationRemote to "
+ << audio_write_duration_remote_;
+ return true;
+#endif // SB_API_VERSION >= 15
}
return false;
}
@@ -211,7 +223,11 @@
base::Bind(&MediaModule::GetSbDecodeTargetGraphicsContextProvider,
base::Unretained(this)),
client, this, options_.allow_resume_after_suspend,
- allow_batched_sample_write_, &media_log_));
+ allow_batched_sample_write_,
+#if SB_API_VERSION >= 15
+ audio_write_duration_local_, audio_write_duration_remote_,
+#endif // SB_API_VERSION >= 15
+ &media_log_));
}
void MediaModule::Suspend() {
diff --git a/cobalt/media/media_module.h b/cobalt/media/media_module.h
index 6ca47b8..59a753e 100644
--- a/cobalt/media/media_module.h
+++ b/cobalt/media/media_module.h
@@ -35,6 +35,7 @@
#include "cobalt/render_tree/resource_provider.h"
#include "cobalt/system_window/system_window.h"
#include "starboard/common/mutex.h"
+#include "starboard/player.h"
#include "third_party/chromium/media/base/media_log.h"
namespace cobalt {
@@ -123,6 +124,11 @@
bool allow_batched_sample_write_ = false;
+#if SB_API_VERSION >= 15
+ SbTime audio_write_duration_local_ = kSbPlayerWriteDurationLocal;
+ SbTime audio_write_duration_remote_ = kSbPlayerWriteDurationRemote;
+#endif // SB_API_VERSION >= 15
+
DecoderBufferAllocator decoder_buffer_allocator_;
};
diff --git a/cobalt/media/player/web_media_player.h b/cobalt/media/player/web_media_player.h
index 1322473..459ec6e 100644
--- a/cobalt/media/player/web_media_player.h
+++ b/cobalt/media/player/web_media_player.h
@@ -130,6 +130,9 @@
virtual int GetNaturalWidth() const = 0;
virtual int GetNaturalHeight() const = 0;
+ // Names of audio connectors used by the playback.
+ virtual std::vector<std::string> GetAudioConnectors() const = 0;
+
// Getters of playback state.
virtual bool IsPaused() const = 0;
virtual bool IsSeeking() const = 0;
diff --git a/cobalt/media/player/web_media_player_impl.cc b/cobalt/media/player/web_media_player_impl.cc
index 8073366..b17f82e 100644
--- a/cobalt/media/player/web_media_player_impl.cc
+++ b/cobalt/media/player/web_media_player_impl.cc
@@ -118,6 +118,9 @@
get_decode_target_graphics_context_provider_func,
WebMediaPlayerClient* client, WebMediaPlayerDelegate* delegate,
bool allow_resume_after_suspend, bool allow_batched_sample_write,
+#if SB_API_VERSION >= 15
+ SbTime audio_write_duration_local, SbTime audio_write_duration_remote,
+#endif // SB_API_VERSION >= 15
::media::MediaLog* const media_log)
: pipeline_thread_("media_pipeline"),
network_state_(WebMediaPlayer::kNetworkStateEmpty),
@@ -146,6 +149,9 @@
Pipeline::Create(interface, window, pipeline_thread_.task_runner(),
get_decode_target_graphics_context_provider_func,
allow_resume_after_suspend_, allow_batched_sample_write_,
+#if SB_API_VERSION >= 15
+ audio_write_duration_local, audio_write_duration_remote,
+#endif // SB_API_VERSION >= 15
media_log_, decode_target_provider_.get());
// Also we want to be notified of |main_loop_| destruction.
@@ -428,6 +434,11 @@
return size.height();
}
+std::vector<std::string> WebMediaPlayerImpl::GetAudioConnectors() const {
+ DCHECK_EQ(main_loop_, base::MessageLoop::current());
+ return pipeline_->GetAudioConnectors();
+}
+
bool WebMediaPlayerImpl::IsPaused() const {
DCHECK_EQ(main_loop_, base::MessageLoop::current());
diff --git a/cobalt/media/player/web_media_player_impl.h b/cobalt/media/player/web_media_player_impl.h
index b64f318..4f0c6a9 100644
--- a/cobalt/media/player/web_media_player_impl.h
+++ b/cobalt/media/player/web_media_player_impl.h
@@ -110,6 +110,10 @@
WebMediaPlayerDelegate* delegate,
bool allow_resume_after_suspend,
bool allow_batched_sample_write,
+#if SB_API_VERSION >= 15
+ SbTime audio_write_duration_local,
+ SbTime audio_write_duration_remote,
+#endif // SB_API_VERSION >= 15
::media::MediaLog* const media_log);
~WebMediaPlayerImpl() override;
@@ -146,6 +150,9 @@
int GetNaturalWidth() const override;
int GetNaturalHeight() const override;
+ // Names of audio connectors used by the playback.
+ std::vector<std::string> GetAudioConnectors() const override;
+
// Getters of playback state.
bool IsPaused() const override;
bool IsSeeking() const override;
diff --git a/cobalt/media/progressive/demuxer_fuzzer.cc b/cobalt/media/progressive/demuxer_fuzzer.cc
index d7be774..5e1755a 100644
--- a/cobalt/media/progressive/demuxer_fuzzer.cc
+++ b/cobalt/media/progressive/demuxer_fuzzer.cc
@@ -19,6 +19,7 @@
#include "base/compiler_specific.h"
#include "base/files/file_path.h"
#include "base/memory/ref_counted.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "cobalt/base/wrap_main.h"
#include "cobalt/media/base/bind_to_loop.h"
#include "cobalt/media/base/pipeline_status.h"
@@ -45,9 +46,8 @@
public:
explicit DemuxerFuzzer(const std::vector<uint8>& content)
: error_occurred_(false), eos_count_(0), stopped_(false) {
- demuxer_ =
- new ProgressiveDemuxer(base::MessageLoop::current()->task_runner(),
- new InMemoryDataSource(content));
+ demuxer_ = new ProgressiveDemuxer(base::ThreadTaskRunnerHandle::Get(),
+ new InMemoryDataSource(content));
}
void Fuzz() {
diff --git a/cobalt/media/progressive/progressive_demuxer.cc b/cobalt/media/progressive/progressive_demuxer.cc
index 3a30c8d..932b45e 100644
--- a/cobalt/media/progressive/progressive_demuxer.cc
+++ b/cobalt/media/progressive/progressive_demuxer.cc
@@ -24,6 +24,7 @@
#include "base/message_loop/message_loop.h"
#include "base/strings/stringprintf.h"
#include "base/task_runner_util.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "base/time/time.h"
#include "base/trace_event/trace_event.h"
#include "cobalt/media/base/data_source.h"
@@ -409,7 +410,7 @@
}
void ProgressiveDemuxer::Download(scoped_refptr<DecoderBuffer> buffer) {
- DCHECK(blocking_thread_.task_runner()->BelongsToCurrentThread());
+ DCHECK(base::ThreadTaskRunnerHandle::Get()->BelongsToCurrentThread());
// We need a requested_au_ or to have canceled this request and
// are buffering to a new location for this to make sense
DCHECK(requested_au_);
@@ -464,7 +465,7 @@
// Notify host of each disjoint range.
host_->OnBufferedTimeRangesChanged(buffered);
- blocking_thread_.task_runner()->PostTask(
+ base::ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE, base::Bind(&ProgressiveDemuxer::IssueNextRequest,
base::Unretained(this)));
}
@@ -516,7 +517,7 @@
// We cannot call Request() directly even if this function is also run on
// |blocking_thread_| as otherwise it is possible that this function is
// running in a tight loop and seek or stop request has no chance to kick in.
- blocking_thread_.task_runner()->PostTask(
+ base::ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE,
base::Bind(&ProgressiveDemuxer::Request, base::Unretained(this), type));
}
diff --git a/cobalt/media/progressive/progressive_parser.cc b/cobalt/media/progressive/progressive_parser.cc
index 771e386..a7b5b52 100644
--- a/cobalt/media/progressive/progressive_parser.cc
+++ b/cobalt/media/progressive/progressive_parser.cc
@@ -35,8 +35,8 @@
DCHECK(media_log);
*parser = NULL;
- // download first 16 bytes of stream to determine file type and extract basic
- // container-specific stream configuration information
+ // download first kInitialHeaderSize bytes of stream to determine file type
+ // and extract basic container-specific stream configuration information
uint8 header[kInitialHeaderSize];
int bytes_read = reader->BlockingRead(0, kInitialHeaderSize, header);
if (bytes_read != kInitialHeaderSize) {
diff --git a/cobalt/media/sandbox/web_media_player_helper.cc b/cobalt/media/sandbox/web_media_player_helper.cc
index ab5c080..c2364a5 100644
--- a/cobalt/media/sandbox/web_media_player_helper.cc
+++ b/cobalt/media/sandbox/web_media_player_helper.cc
@@ -17,6 +17,7 @@
#include <memory>
#include <utility>
+#include "base/threading/thread_task_runner_handle.h"
#include "cobalt/media/file_data_source.h"
#include "cobalt/media/url_fetcher_data_source.h"
#include "third_party/chromium/media/cobalt/ui/gfx/geometry/rect.h"
@@ -88,9 +89,9 @@
player_->LoadProgressive(video_url, std::move(data_source));
} else {
std::unique_ptr<DataSource> data_source(new URLFetcherDataSource(
- base::MessageLoop::current()->task_runner(), video_url,
- csp::SecurityCallback(), fetcher_factory->network_module(),
- loader::kNoCORSMode, loader::Origin()));
+ base::ThreadTaskRunnerHandle::Get(), video_url, csp::SecurityCallback(),
+ fetcher_factory->network_module(), loader::kNoCORSMode,
+ loader::Origin()));
player_->LoadProgressive(video_url, std::move(data_source));
}
diff --git a/cobalt/media/url_fetcher_data_source.cc b/cobalt/media/url_fetcher_data_source.cc
index 4f25520..a52e1fb 100644
--- a/cobalt/media/url_fetcher_data_source.cc
+++ b/cobalt/media/url_fetcher_data_source.cc
@@ -340,6 +340,7 @@
std::move(net::URLFetcher::Create(url_, net::URLFetcher::GET, this));
fetcher_->SetRequestContext(
network_module_->url_request_context_getter().get());
+ network_module_->AddClientHintHeaders(*fetcher_);
std::unique_ptr<loader::URLFetcherStringWriter> download_data_writer(
new loader::URLFetcherStringWriter());
fetcher_->SaveResponseWithWriter(std::move(download_data_writer));
diff --git a/cobalt/media_capture/media_devices.cc b/cobalt/media_capture/media_devices.cc
index e754755..7a75b18 100644
--- a/cobalt/media_capture/media_devices.cc
+++ b/cobalt/media_capture/media_devices.cc
@@ -18,6 +18,7 @@
#include <string>
#include <utility>
+#include "base/threading/thread_task_runner_handle.h"
#include "cobalt/base/polymorphic_downcast.h"
#include "cobalt/media_capture/media_device_info.h"
#include "cobalt/media_stream/media_stream.h"
@@ -175,7 +176,7 @@
void MediaDevices::OnMicrophoneStopped() {
if (javascript_message_loop_->task_runner() !=
- base::MessageLoop::current()->task_runner()) {
+ base::ThreadTaskRunnerHandle::Get()) {
javascript_message_loop_->task_runner()->PostTask(
FROM_HERE, base::Bind(&MediaDevices::OnMicrophoneStopped, weak_this_));
return;
@@ -195,7 +196,7 @@
void MediaDevices::OnMicrophoneSuccess() {
if (javascript_message_loop_->task_runner() !=
- base::MessageLoop::current()->task_runner()) {
+ base::ThreadTaskRunnerHandle::Get()) {
javascript_message_loop_->task_runner()->PostTask(
FROM_HERE, base::Bind(&MediaDevices::OnMicrophoneSuccess, this));
return;
diff --git a/cobalt/media_capture/media_recorder.cc b/cobalt/media_capture/media_recorder.cc
index 7e868a5..05d7851 100644
--- a/cobalt/media_capture/media_recorder.cc
+++ b/cobalt/media_capture/media_recorder.cc
@@ -25,6 +25,7 @@
#include "base/message_loop/message_loop.h"
#include "base/strings/string_piece.h"
#include "base/strings/string_util_starboard.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "cobalt/base/polymorphic_downcast.h"
#include "cobalt/base/tokens.h"
#include "cobalt/media_capture/blob_event.h"
@@ -232,7 +233,7 @@
: web::EventTarget(settings),
settings_(settings),
stream_(stream),
- javascript_message_loop_(base::MessageLoop::current()->task_runner()),
+ javascript_message_loop_(base::ThreadTaskRunnerHandle::Get()),
ALLOW_THIS_IN_INITIALIZER_LIST(weak_ptr_factory_(this)),
weak_this_(weak_ptr_factory_.GetWeakPtr()) {
DCHECK(settings);
diff --git a/cobalt/media_integration_tests/endurance/endurance_test.py b/cobalt/media_integration_tests/endurance/endurance_test.py
index 213ae65..a5492b98d 100644
--- a/cobalt/media_integration_tests/endurance/endurance_test.py
+++ b/cobalt/media_integration_tests/endurance/endurance_test.py
@@ -40,7 +40,7 @@
"""
def __init__(self, *args, **kwargs):
- super(EnduranceTest, self).__init__(*args, **kwargs)
+ super().__init__(*args, **kwargs)
parser = argparse.ArgumentParser()
parser.add_argument(
@@ -129,19 +129,19 @@
def GenerateErrorString(self, err_msg):
return (
- '%s (running time: %f, player identifier: "%s", is playing: %r, '
- 'playback start time: %f, last media time: %f (updated at %f), '
- 'last written audio timestamp: %d (updated at %f), '
- 'last written video timestamp: %d (updated at %f), '
- 'audio eos written at %f, video eos written at %f, '
- 'playback ended at %f).' %
- (err_msg, time.time() - self.start_time, self.player_identifier,
- self.playback_is_playing, self.playback_start_time,
- self.last_media_time, self.last_media_time_update_time,
- self.last_written_audio_timestamp, self.last_written_audio_update_time,
- self.last_written_video_timestamp, self.last_written_video_update_time,
- self.audio_eos_written_time, self.video_eos_written_time,
- self.playback_end_time))
+ f'{err_msg} (running time: {time.time() - self.start_time}, '
+ f'player identifier: "{self.player_identifier}", '
+ f'is playing: {self.playback_is_playing}, '
+ f'playback start time: {self.playback_start_time}, '
+ f'last media time: {self.last_media_time} '
+ f'(updated at {self.last_media_time_update_time}), '
+ f'last written audio timestamp: {self.last_written_audio_timestamp} '
+ f'(updated at {self.last_written_audio_update_time}), '
+ f'last written video timestamp: {self.last_written_video_timestamp} '
+ f'(updated at {self.last_written_video_update_time}), '
+ f'audio eos written at {self.audio_eos_written_time}, '
+ f'video eos written at {self.video_eos_written_time}, '
+ f'playback ended at {self.playback_end_time}).')
def SendRandomAction(self, app):
@@ -195,8 +195,8 @@
current_running_time - self.last_state_check_time <
PLAYER_INITIALIZATION_WAITING_TIMEOUT,
self.GenerateErrorString(
- 'Timed out waiting for player initialization (waited: %f).' %
- (current_running_time - self.last_state_check_time)))
+ 'Timed out waiting for player initialization (waited: '
+ f'{current_running_time - self.last_state_check_time}).'))
continue
self.last_state_check_time = current_running_time
# Skip to next playback if it has been played for long time.
@@ -215,8 +215,8 @@
current_running_time - self.last_media_time_update_time <
MEDIA_TIME_UPDATE_WAITING_TIMEOUT,
self.GenerateErrorString(
- 'Timed out waiting for media time update (waited: %f).' %
- (current_running_time - self.last_media_time_update_time)))
+ 'Timed out waiting for media time update (waited: '
+ f'{current_running_time - self.last_media_time_update_time}).')) # pylint: disable=line-too-long
# Check written audio timestamp.
if (self.last_written_audio_update_time > 0 and
self.audio_eos_written_time == -1):
@@ -224,9 +224,8 @@
current_running_time - self.last_written_audio_update_time <
WRITTEN_INPUT_WAITING_TIMEOUT,
self.GenerateErrorString(
- 'Timed out waiting for new audio input (waited: %f).' %
- (current_running_time -
- self.last_written_audio_update_time)))
+ 'Timed out waiting for new audio input (waited: '
+ f'{current_running_time - self.last_written_audio_update_time}).')) # pylint: disable=line-too-long
# Check written video timestamp.
if (self.last_written_video_update_time > 0 and
self.video_eos_written_time == -1):
@@ -234,9 +233,8 @@
current_running_time - self.last_written_video_update_time <
WRITTEN_INPUT_WAITING_TIMEOUT,
self.GenerateErrorString(
- 'Timed out waiting for new video input (waited: %f).' %
- (current_running_time -
- self.last_written_video_update_time)))
+ 'Timed out waiting for new video input (waited: '
+ f'{current_running_time -self.last_written_video_update_time}).')) # pylint: disable=line-too-long
# Check if the playback ends properly.
if (self.audio_eos_written_time > 0 and
self.video_eos_written_time > 0 and self.playback_end_time > 0):
@@ -244,8 +242,8 @@
current_running_time - self.playback_end_time <
PLAYBACK_END_WAITING_TIMEOUT,
self.GenerateErrorString(
- 'Timed out waiting for playback to end (waited: %f).' %
- (current_running_time - self.playback_end_time,)))
+ 'Timed out waiting for playback to end (waited: '
+ f'{current_running_time - self.playback_end_time}).'))
# Send random actions.
if (self.needs_random_action and
diff --git a/cobalt/media_integration_tests/performance/codec_capability.py b/cobalt/media_integration_tests/performance/codec_capability.py
index 4178d07..c843b17 100644
--- a/cobalt/media_integration_tests/performance/codec_capability.py
+++ b/cobalt/media_integration_tests/performance/codec_capability.py
@@ -32,8 +32,8 @@
for res_name, _ in reversed(MimeStrings.RESOLUTIONS.items()):
if app.IsMediaTypeSupported(
MimeStrings.create_video_mime_string(codec_mime, res_name)):
- return '[%s, %s]' % (codec_name, res_name)
- return '[%s, n/a]' % (codec_name)
+ return f'[{codec_name}, {res_name}]'
+ return f'[{codec_name}, n/a]'
# Returns a string which shows the max supported channels, or "n/a" if the
# codec is not supported.
@@ -43,8 +43,8 @@
for channels in [6, 4, 2]:
if app.IsMediaTypeSupported(
MimeStrings.create_audio_mime_string(codec_mime, channels)):
- return '[%s, %s]' % (codec_name, channels)
- return '[%s, n/a]' % (codec_name)
+ return f'[{codec_name}, {channels}]'
+ return f'[{codec_name}, n/a]'
def test_video_codec_capability(self):
app = self.CreateCobaltApp(PlaybackUrls.DEFAULT)
diff --git a/cobalt/media_integration_tests/performance/dropped_frames.py b/cobalt/media_integration_tests/performance/dropped_frames.py
index bcd0215..1b2a2b3 100644
--- a/cobalt/media_integration_tests/performance/dropped_frames.py
+++ b/cobalt/media_integration_tests/performance/dropped_frames.py
@@ -26,7 +26,7 @@
"""
def __init__(self, *args, **kwargs):
- super(DroppedFrameTest, self).__init__(*args, **kwargs)
+ super().__init__(*args, **kwargs)
parser = argparse.ArgumentParser()
parser.add_argument('--test_times', default=5, type=int)
diff --git a/cobalt/media_integration_tests/performance/start_latency.py b/cobalt/media_integration_tests/performance/start_latency.py
index cb3f644..54df143 100644
--- a/cobalt/media_integration_tests/performance/start_latency.py
+++ b/cobalt/media_integration_tests/performance/start_latency.py
@@ -28,7 +28,7 @@
"""
def __init__(self, *args, **kwargs):
- super(StartLatencyTest, self).__init__(*args, **kwargs)
+ super().__init__(*args, **kwargs)
parser = argparse.ArgumentParser()
parser.add_argument('--test_times', default=5, type=int)
@@ -145,12 +145,12 @@
]
for name, playback_url, mime_str in TEST_PARAMETERS:
- TestCase.CreateTest(StartLatencyTest, 'first_start_latency_%s' % (name),
+ TestCase.CreateTest(StartLatencyTest, f'first_start_latency_{name}',
StartLatencyTest.run_first_start_latency_test, name,
playback_url, mime_str)
- TestCase.CreateTest(StartLatencyTest, 'play_pause_latency_%s' % (name),
+ TestCase.CreateTest(StartLatencyTest, f'play_pause_latency_{name}',
StartLatencyTest.run_play_pause_latency_test, name,
playback_url, mime_str)
- TestCase.CreateTest(StartLatencyTest, 'fastforward_latency_%s' % (name),
+ TestCase.CreateTest(StartLatencyTest, f'fastforward_latency_{name}',
StartLatencyTest.run_fastforward_latency_test, name,
playback_url, mime_str)
diff --git a/cobalt/media_integration_tests/test_app.py b/cobalt/media_integration_tests/test_app.py
index ba65c1d..60e1042 100644
--- a/cobalt/media_integration_tests/test_app.py
+++ b/cobalt/media_integration_tests/test_app.py
@@ -30,7 +30,7 @@
WAIT_UNTIL_ADS_END_DEFAULT_TIMEOUT_SECONDS = 120
WAIT_UNTIL_MEDIA_TIME_REACHED_DEFAULT_TIMEOUT_SECONDS = 30
-ACCOUNT_SELECTOR_ADD_ACCOUNT_TEXT = u'Add account'
+ACCOUNT_SELECTOR_ADD_ACCOUNT_TEXT = 'Add account'
def GetValueFromQueryResult(query_result, key, default):
@@ -61,13 +61,13 @@
try:
# Try to parse numbers and booleans.
parsed_value = json.loads(value)
- except ValueError:
- raise RuntimeError('Failed to parse query result.')
+ except ValueError as e:
+ raise RuntimeError('Failed to parse query result.') from e
return parsed_value
- raise NotImplementedError('Convertion from (%s) to (%s) is not supported.' %
- (value_type, default_value_type))
+ raise NotImplementedError(f'Convertion from ({value_type}) to '
+ f'({default_value_type}) is not supported.')
class AdditionalKeys():
@@ -75,12 +75,12 @@
Set of special keys codes for media control, corresponding to cobalt
webdriver AdditionalSpecialKey.
"""
- MEDIA_NEXT_TRACK = u'\uf000'
- MEDIA_PREV_TRACK = u'\uf001'
- MEDIA_STOP = u'\uf002'
- MEDIA_PLAY_PAUSE = u'\uf003'
- MEDIA_REWIND = u'\uf004'
- MEDIA_FAST_FORWARD = u'\uf005'
+ MEDIA_NEXT_TRACK = '\uf000'
+ MEDIA_PREV_TRACK = '\uf001'
+ MEDIA_STOP = '\uf002'
+ MEDIA_PLAY_PAUSE = '\uf003'
+ MEDIA_REWIND = '\uf004'
+ MEDIA_FAST_FORWARD = '\uf005'
class Features():
@@ -164,7 +164,7 @@
def __init__(self, query_result=None):
# If there's no player existing, the query return unicode code "null".
- if (not query_result is None and not query_result == u'null' and
+ if (not query_result is None and not query_result == 'null' and
not isinstance(query_result, dict)):
raise NotImplementedError
@@ -311,7 +311,7 @@
if string == 'playing':
return 2
raise NotImplementedError(
- '"%s" is not a valid media session playback state.' % string)
+ f'"{string}" is not a valid media session playback state.')
class MediaSessionState():
@@ -364,7 +364,7 @@
def __init__(self, query_result=None):
# If there's no player existing, the query return unicode code "null".
- if (not query_result is None and not query_result == u'null' and
+ if (not query_result is None and not query_result == 'null' and
not isinstance(query_result, dict)):
raise NotImplementedError
@@ -543,7 +543,8 @@
try:
result = self.runner.webdriver.execute_script(script)
except Exception as e: # pylint: disable=broad-except
- raise RuntimeError('Fail to execute script with error (%s).' % (str(e)))
+ raise RuntimeError('Fail to execute script with error '
+ f'({str(e)}).') from e
return result
def _OnNewLogLine(self, line):
@@ -682,8 +683,8 @@
time.sleep(WAIT_INTERVAL_SECONDS)
execute_interval = time.time() - start_time
if execute_interval > timeout:
- raise RuntimeError('WaitUntilReachState timed out after (%f) seconds.' %
- (execute_interval))
+ raise RuntimeError('WaitUntilReachState timed out after '
+ f'({execute_interval}) seconds.')
# The result is an array of overlay header text contents.
_OVERLAY_QUERY_JS_CODE = """
@@ -791,9 +792,9 @@
execute_interval = time.time() - start_time
if execute_interval > timeout:
- raise RuntimeError(
- 'WaitUntilReachState timed out after (%f) seconds, ads_state: (%d).' %
- (execute_interval, ads_state))
+ raise RuntimeError('WaitUntilReachState timed out after '
+ f'({execute_interval}) seconds, '
+ f'ads_state: ({ads_state}).')
def WaitUntilMediaTimeReached(
self,
@@ -822,8 +823,7 @@
adjusted_timeout)
def IsMediaTypeSupported(self, mime):
- return self.ExecuteScript('return MediaSource.isTypeSupported("%s");' %
- (mime))
+ return self.ExecuteScript(f'return MediaSource.isTypeSupported("{mime}");')
def PlayOrPause(self):
self.SendKeys(AdditionalKeys.MEDIA_PLAY_PAUSE)
diff --git a/cobalt/media_integration_tests/test_case.py b/cobalt/media_integration_tests/test_case.py
index d756418..bcdc1ae 100644
--- a/cobalt/media_integration_tests/test_case.py
+++ b/cobalt/media_integration_tests/test_case.py
@@ -37,7 +37,7 @@
"""The base class for media integration test cases."""
def __init__(self, *args, **kwargs):
- super(TestCase, self).__init__(*args, **kwargs)
+ super().__init__(*args, **kwargs)
self.launcher_params = _launcher_params
self.supported_features = _supported_features
@@ -60,6 +60,6 @@
@staticmethod
def CreateTest(test_class, test_name, test_function, *args):
- test_method = lambda self: test_function(self, *args)
- test_method.__name__ = 'test_%s' % test_name
+ test_method = lambda self: test_function(self, *args) # pylint: disable=unnecessary-lambda-assignment
+ test_method.__name__ = f'test_{test_name}'
setattr(test_class, test_method.__name__, test_method)
diff --git a/cobalt/media_integration_tests/test_util.py b/cobalt/media_integration_tests/test_util.py
index 81b8e2a..e22cd78 100644
--- a/cobalt/media_integration_tests/test_util.py
+++ b/cobalt/media_integration_tests/test_util.py
@@ -46,11 +46,11 @@
@staticmethod
def create_video_mime_string(codec, resolution):
- return '%s; %s' % (codec, MimeStrings.RESOLUTIONS[resolution])
+ return f'{codec}; {MimeStrings.RESOLUTIONS[resolution]}'
@staticmethod
def create_audio_mime_string(codec, channels):
- return '%s; channels=%d' % (codec, channels)
+ return f'{codec}; channels={channels}'
class PlaybackUrls():
diff --git a/cobalt/media_session/media_session.cc b/cobalt/media_session/media_session.cc
index d0d412c..1a21cb8 100644
--- a/cobalt/media_session/media_session.cc
+++ b/cobalt/media_session/media_session.cc
@@ -14,6 +14,7 @@
#include "cobalt/media_session/media_session.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "cobalt/media_session/media_session_client.h"
namespace cobalt {
@@ -21,7 +22,7 @@
MediaSession::MediaSession()
: playback_state_(kMediaSessionPlaybackStateNone),
- task_runner_(base::MessageLoop::current()->task_runner()),
+ task_runner_(base::ThreadTaskRunnerHandle::Get()),
is_change_task_queued_(false),
last_position_updated_time_(0) {}
@@ -40,7 +41,7 @@
MediaSession::MediaSession(MediaSessionClient* client)
: media_session_client_(client),
playback_state_(kMediaSessionPlaybackStateNone),
- task_runner_(base::MessageLoop::current()->task_runner()),
+ task_runner_(base::ThreadTaskRunnerHandle::Get()),
is_change_task_queued_(false),
last_position_updated_time_(0) {}
diff --git a/cobalt/media_stream/microphone_audio_source.cc b/cobalt/media_stream/microphone_audio_source.cc
index 74eff37..3ed0be9 100644
--- a/cobalt/media_stream/microphone_audio_source.cc
+++ b/cobalt/media_stream/microphone_audio_source.cc
@@ -17,6 +17,7 @@
#include <memory>
#include <string>
+#include "base/threading/thread_task_runner_handle.h"
#include "cobalt/media_stream/audio_parameters.h"
#include "cobalt/speech/microphone.h"
#include "cobalt/speech/microphone_fake.h"
@@ -81,8 +82,7 @@
// Furthermore, it is an error to destruct the microphone manager
// without stopping it, so these callbacks are not to be called
// during the destruction of the object.
- : javascript_thread_task_runner_(
- base::MessageLoop::current()->task_runner()),
+ : javascript_thread_task_runner_(base::ThreadTaskRunnerHandle::Get()),
ALLOW_THIS_IN_INITIALIZER_LIST(weak_ptr_factory_(this)),
successful_open_callback_(successful_open),
completion_callback_(completion),
@@ -106,8 +106,7 @@
}
void MicrophoneAudioSource::OnDataCompletion() {
- if (javascript_thread_task_runner_ !=
- base::MessageLoop::current()->task_runner()) {
+ if (javascript_thread_task_runner_ != base::ThreadTaskRunnerHandle::Get()) {
javascript_thread_task_runner_->PostTask(
FROM_HERE, base::Bind(&MicrophoneAudioSource::OnDataCompletion,
weak_ptr_factory_.GetWeakPtr()));
@@ -123,8 +122,7 @@
}
void MicrophoneAudioSource::OnMicrophoneOpen() {
- if (javascript_thread_task_runner_ !=
- base::MessageLoop::current()->task_runner()) {
+ if (javascript_thread_task_runner_ != base::ThreadTaskRunnerHandle::Get()) {
javascript_thread_task_runner_->PostTask(
FROM_HERE, base::Bind(&MicrophoneAudioSource::OnMicrophoneOpen,
weak_ptr_factory_.GetWeakPtr()));
@@ -139,8 +137,7 @@
void MicrophoneAudioSource::OnMicrophoneError(
speech::MicrophoneManager::MicrophoneError error,
std::string error_message) {
- if (javascript_thread_task_runner_ !=
- base::MessageLoop::current()->task_runner()) {
+ if (javascript_thread_task_runner_ != base::ThreadTaskRunnerHandle::Get()) {
javascript_thread_task_runner_->PostTask(
FROM_HERE,
base::Bind(&MicrophoneAudioSource::OnMicrophoneError,
diff --git a/cobalt/network/BUILD.gn b/cobalt/network/BUILD.gn
index bbfa1cb..adf0975 100644
--- a/cobalt/network/BUILD.gn
+++ b/cobalt/network/BUILD.gn
@@ -58,6 +58,10 @@
"//url",
]
+ if (!is_gold) {
+ deps += [ "//cobalt/debug:console_command_manager" ]
+ }
+
if (enable_in_app_dial) {
deps += [
# DialService depends on http server.
diff --git a/cobalt/network/net_poster.cc b/cobalt/network/net_poster.cc
index c40b95e..c2d1a1b 100644
--- a/cobalt/network/net_poster.cc
+++ b/cobalt/network/net_poster.cc
@@ -16,7 +16,10 @@
#include <algorithm>
#include <memory>
+#include <utility>
+#include <vector>
+#include "base/threading/thread_task_runner_handle.h"
#include "cobalt/network/network_module.h"
#include "net/base/net_errors.h"
@@ -30,8 +33,7 @@
void NetPoster::Send(const GURL& url, const std::string& content_type,
const std::string& data) {
- if (network_module_->task_runner() !=
- base::MessageLoop::current()->task_runner()) {
+ if (network_module_->task_runner() != base::ThreadTaskRunnerHandle::Get()) {
network_module_->task_runner()->PostTask(
FROM_HERE, base::Bind(&NetPoster::Send, base::Unretained(this), url,
content_type, data));
@@ -47,6 +49,7 @@
url_fetcher->SetStopOnRedirect(true);
url_fetcher->SetRequestContext(
network_module_->url_request_context_getter().get());
+ network_module_->AddClientHintHeaders(*url_fetcher);
if (data.size()) {
url_fetcher->SetUploadData(content_type, data);
@@ -58,7 +61,7 @@
void NetPoster::OnURLFetchComplete(const net::URLFetcher* source) {
// Make sure the thread that created the fetcher is the same one that deletes
// it. Otherwise we have unsafe access to the fetchers_ list.
- DCHECK_EQ(base::MessageLoop::current()->task_runner(),
+ DCHECK_EQ(base::ThreadTaskRunnerHandle::Get(),
network_module_->task_runner());
net::URLRequestStatus status = source->GetStatus();
if (!status.is_success()) {
diff --git a/cobalt/network/network_delegate.cc b/cobalt/network/network_delegate.cc
index a4c1158..6e7234e 100644
--- a/cobalt/network/network_delegate.cc
+++ b/cobalt/network/network_delegate.cc
@@ -26,8 +26,10 @@
namespace network {
NetworkDelegate::NetworkDelegate(net::StaticCookiePolicy::Type cookie_policy,
- network::HTTPSRequirement https_requirement)
+ network::HTTPSRequirement https_requirement,
+ network::CORSPolicy cors_policy)
: cookie_policy_(cookie_policy),
+ cors_policy_(cors_policy),
cookies_enabled_(true),
https_requirement_(https_requirement) {}
diff --git a/cobalt/network/network_delegate.h b/cobalt/network/network_delegate.h
index f44478b..8fbeb8f 100644
--- a/cobalt/network/network_delegate.h
+++ b/cobalt/network/network_delegate.h
@@ -15,6 +15,7 @@
#ifndef COBALT_NETWORK_NETWORK_DELEGATE_H_
#define COBALT_NETWORK_NETWORK_DELEGATE_H_
+#include <set>
#include <string>
#include "net/base/static_cookie_policy.h"
@@ -30,6 +31,11 @@
kHTTPSOptional,
};
+enum CORSPolicy {
+ kCORSRequired,
+ kCORSOptional,
+};
+
// A NetworkDelegate receives callbacks when network events occur.
// Each override can specify custom behavior or just add additional logging.
// We do nothing for most events, but our network delegate
@@ -37,13 +43,16 @@
class NetworkDelegate : public net::NetworkDelegate {
public:
NetworkDelegate(net::StaticCookiePolicy::Type cookie_policy,
- network::HTTPSRequirement https_requirement);
+ network::HTTPSRequirement https_requirement,
+ network::CORSPolicy cors_policy);
~NetworkDelegate() override;
// For debugging, we allow blocking all cookies.
void set_cookies_enabled(bool enabled) { cookies_enabled_ = enabled; }
bool cookies_enabled() const { return cookies_enabled_; }
+ network::CORSPolicy cors_policy() const { return cors_policy_; }
+
protected:
// net::NetworkDelegate implementation.
int OnBeforeURLRequest(net::URLRequest* request,
@@ -88,10 +97,10 @@
bool OnCanAccessFile(const net::URLRequest& request,
const base::FilePath& original_path,
const base::FilePath& absolute_path) const override;
- virtual bool OnCanEnablePrivacyMode(
- const GURL& url, const GURL& site_for_cookies) const override;
- virtual bool OnAreExperimentalCookieFeaturesEnabled() const override;
- virtual bool OnCancelURLRequestWithPolicyViolatingReferrerHeader(
+ bool OnCanEnablePrivacyMode(const GURL& url,
+ const GURL& site_for_cookies) const override;
+ bool OnAreExperimentalCookieFeaturesEnabled() const override;
+ bool OnCancelURLRequestWithPolicyViolatingReferrerHeader(
const net::URLRequest& request, const GURL& target_url,
const GURL& referrer_url) const override;
@@ -100,24 +109,23 @@
// Reporting is a central mechanism for sending out-of-band error reports
// to origins from various other components (e.g. HTTP Public Key Pinning,
// Interventions, or Content Security Policy could potentially use it).
- virtual bool OnCanQueueReportingReport(
- const url::Origin& origin) const override;
+ bool OnCanQueueReportingReport(const url::Origin& origin) const override;
- virtual void OnCanSendReportingReports(
- std::set<url::Origin> origins,
- base::OnceCallback<void(std::set<url::Origin>)> result_callback)
- const override;
+ void OnCanSendReportingReports(std::set<url::Origin> origins,
+ base::OnceCallback<void(std::set<url::Origin>)>
+ result_callback) const override;
- virtual bool OnCanSetReportingClient(const url::Origin& origin,
- const GURL& endpoint) const override;
+ bool OnCanSetReportingClient(const url::Origin& origin,
+ const GURL& endpoint) const override;
- virtual bool OnCanUseReportingClient(const url::Origin& origin,
- const GURL& endpoint) const override;
+ bool OnCanUseReportingClient(const url::Origin& origin,
+ const GURL& endpoint) const override;
net::StaticCookiePolicy::Type ComputeCookiePolicy() const;
private:
net::StaticCookiePolicy::Type cookie_policy_;
+ network::CORSPolicy cors_policy_;
bool cookies_enabled_;
network::HTTPSRequirement https_requirement_;
diff --git a/cobalt/network/network_module.cc b/cobalt/network/network_module.cc
index f192961..b358c11 100644
--- a/cobalt/network/network_module.cc
+++ b/cobalt/network/network_module.cc
@@ -41,11 +41,14 @@
Initialize("Null user agent string.", NULL);
}
-NetworkModule::NetworkModule(const std::string& user_agent_string,
- storage::StorageManager* storage_manager,
- base::EventDispatcher* event_dispatcher,
- const Options& options)
- : storage_manager_(storage_manager), options_(options) {
+NetworkModule::NetworkModule(
+ const std::string& user_agent_string,
+ const std::vector<std::string>& client_hint_headers,
+ storage::StorageManager* storage_manager,
+ base::EventDispatcher* event_dispatcher, const Options& options)
+ : client_hint_headers_(client_hint_headers),
+ storage_manager_(storage_manager),
+ options_(options) {
Initialize(user_agent_string, event_dispatcher);
}
@@ -170,7 +173,7 @@
}
void NetworkModule::OnCreate(base::WaitableEvent* creation_event) {
- DCHECK(task_runner()->BelongsToCurrentThread());
+ DCHECK(task_runner()->RunsTasksInCurrentSequence());
net::NetLog* net_log = NULL;
#if defined(ENABLE_NETWORK_LOGGING)
@@ -180,8 +183,9 @@
new URLRequestContext(storage_manager_, options_.custom_proxy, net_log,
options_.ignore_certificate_errors, task_runner(),
options_.persistent_settings));
- network_delegate_.reset(
- new NetworkDelegate(options_.cookie_policy, options_.https_requirement));
+ network_delegate_.reset(new NetworkDelegate(options_.cookie_policy,
+ options_.https_requirement,
+ options_.cors_policy));
url_request_context_->set_http_user_agent_settings(
http_user_agent_settings_.get());
url_request_context_->set_network_delegate(network_delegate_.get());
@@ -197,5 +201,16 @@
creation_event->Signal();
}
+void NetworkModule::AddClientHintHeaders(net::URLFetcher& url_fetcher) const {
+ // Check if persistent setting is enabled before adding the headers.
+ if (options_.persistent_settings != nullptr &&
+ options_.persistent_settings->GetPersistentSettingAsBool(
+ kClientHintHeadersEnabledPersistentSettingsKey, false)) {
+ for (const auto& header : client_hint_headers_) {
+ url_fetcher.AddExtraRequestHeader(header);
+ }
+ }
+}
+
} // namespace network
} // namespace cobalt
diff --git a/cobalt/network/network_module.h b/cobalt/network/network_module.h
index ee04620..1162cac 100644
--- a/cobalt/network/network_module.h
+++ b/cobalt/network/network_module.h
@@ -17,9 +17,10 @@
#include <memory>
#include <string>
+#include <vector>
#include "base/message_loop/message_loop.h"
-#include "base/single_thread_task_runner.h"
+#include "base/sequenced_task_runner.h"
#include "base/threading/thread.h"
#include "cobalt/base/event_dispatcher.h"
#include "cobalt/network/cobalt_net_log.h"
@@ -50,6 +51,9 @@
namespace network {
+const char kClientHintHeadersEnabledPersistentSettingsKey[] =
+ "clientHintHeadersEnabled";
+
class NetworkSystem;
// NetworkModule wraps various networking-related components such as
// a URL request context. This is owned by BrowserModule.
@@ -60,12 +64,14 @@
: cookie_policy(net::StaticCookiePolicy::BLOCK_ALL_THIRD_PARTY_COOKIES),
ignore_certificate_errors(false),
https_requirement(network::kHTTPSRequired),
+ cors_policy(network::kCORSRequired),
preferred_language("en-US"),
max_network_delay(0),
persistent_settings(nullptr) {}
net::StaticCookiePolicy::Type cookie_policy;
bool ignore_certificate_errors;
HTTPSRequirement https_requirement;
+ network::CORSPolicy cors_policy;
std::string preferred_language;
std::string custom_proxy;
SbTime max_network_delay;
@@ -77,6 +83,7 @@
// Constructor for production use.
NetworkModule(const std::string& user_agent_string,
+ const std::vector<std::string>& client_hint_headers,
storage::StorageManager* storage_manager,
base::EventDispatcher* event_dispatcher,
const Options& options = Options());
@@ -94,7 +101,7 @@
scoped_refptr<URLRequestContextGetter> url_request_context_getter() const {
return url_request_context_getter_;
}
- scoped_refptr<base::SingleThreadTaskRunner> task_runner() const {
+ scoped_refptr<base::SequencedTaskRunner> task_runner() const {
return thread_->task_runner();
}
storage::StorageManager* storage_manager() const { return storage_manager_; }
@@ -109,12 +116,17 @@
void SetEnableQuic(bool enable_quic);
+ // Adds the Client Hint Headers to the provided URLFetcher.
+ // It is conditional on kClientHintHeadersEnabledPersistentSettingsKey != 0.
+ void AddClientHintHeaders(net::URLFetcher& url_fetcher) const;
+
private:
void Initialize(const std::string& user_agent_string,
base::EventDispatcher* event_dispatcher);
void OnCreate(base::WaitableEvent* creation_event);
std::unique_ptr<network_bridge::NetPoster> CreateNetPoster();
+ std::vector<std::string> client_hint_headers_;
storage::StorageManager* storage_manager_;
std::unique_ptr<base::Thread> thread_;
std::unique_ptr<URLRequestContext> url_request_context_;
diff --git a/cobalt/network/persistent_cookie_store.cc b/cobalt/network/persistent_cookie_store.cc
index c607ddf..e0772eb 100644
--- a/cobalt/network/persistent_cookie_store.cc
+++ b/cobalt/network/persistent_cookie_store.cc
@@ -15,6 +15,7 @@
#include "cobalt/network/persistent_cookie_store.h"
#include <memory>
+#include <utility>
#include <vector>
#include "base/bind.h"
@@ -30,7 +31,7 @@
void CookieStorageInit(
const PersistentCookieStore::LoadedCallback& loaded_callback,
- scoped_refptr<base::SingleThreadTaskRunner> loaded_callback_task_runner,
+ scoped_refptr<base::SequencedTaskRunner> loaded_callback_task_runner,
const storage::MemoryStore& memory_store) {
TRACE_EVENT0("cobalt::network", "PersistentCookieStore::CookieStorageInit()");
@@ -79,7 +80,7 @@
void SendEmptyCookieList(
const PersistentCookieStore::LoadedCallback& loaded_callback,
- scoped_refptr<base::SingleThreadTaskRunner> loaded_callback_task_runner,
+ scoped_refptr<base::SequencedTaskRunner> loaded_callback_task_runner,
const storage::MemoryStore& memory_store) {
loaded_callback_task_runner->PostTask(
FROM_HERE,
@@ -96,7 +97,7 @@
PersistentCookieStore::PersistentCookieStore(
storage::StorageManager* storage,
- scoped_refptr<base::SingleThreadTaskRunner> network_task_runner)
+ scoped_refptr<base::SequencedTaskRunner> network_task_runner)
: storage_(storage), loaded_callback_task_runner_(network_task_runner) {}
PersistentCookieStore::~PersistentCookieStore() {}
diff --git a/cobalt/network/persistent_cookie_store.h b/cobalt/network/persistent_cookie_store.h
index bbc428c..065b082 100644
--- a/cobalt/network/persistent_cookie_store.h
+++ b/cobalt/network/persistent_cookie_store.h
@@ -18,7 +18,7 @@
#include <string>
#include <vector>
-#include "base/single_thread_task_runner.h"
+#include "base/sequenced_task_runner.h"
#include "cobalt/storage/storage_manager.h"
#include "net/cookies/cookie_monster.h"
@@ -29,7 +29,7 @@
public:
explicit PersistentCookieStore(
storage::StorageManager* storage,
- scoped_refptr<base::SingleThreadTaskRunner> network_task_runner);
+ scoped_refptr<base::SequencedTaskRunner> network_task_runner);
~PersistentCookieStore() override;
// net::CookieMonster::PersistentCookieStore methods
@@ -52,7 +52,7 @@
storage::StorageManager* storage_;
// This is required because for example cookie store callbacks can only be
// executed on the network thread.
- scoped_refptr<base::SingleThreadTaskRunner> loaded_callback_task_runner_;
+ scoped_refptr<base::SequencedTaskRunner> loaded_callback_task_runner_;
DISALLOW_COPY_AND_ASSIGN(PersistentCookieStore);
};
diff --git a/cobalt/network/persistent_cookie_store_test.cc b/cobalt/network/persistent_cookie_store_test.cc
index a05cde9..b01c257 100644
--- a/cobalt/network/persistent_cookie_store_test.cc
+++ b/cobalt/network/persistent_cookie_store_test.cc
@@ -23,6 +23,7 @@
#include "base/message_loop/message_loop.h"
#include "base/path_service.h"
#include "base/strings/stringprintf.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "base/time/time.h"
#include "cobalt/base/cobalt_paths.h"
#include "cobalt/storage/savegame.h"
@@ -112,7 +113,7 @@
storage_manager_.reset(new storage::StorageManager(options));
cookie_store_ = new PersistentCookieStore(
- storage_manager_.get(), base::MessageLoop::current()->task_runner());
+ storage_manager_.get(), base::ThreadTaskRunnerHandle::Get());
}
~PersistentCookieStoreTest() {
diff --git a/cobalt/network/url_request_context.cc b/cobalt/network/url_request_context.cc
index eb46dcf..7467b98 100644
--- a/cobalt/network/url_request_context.cc
+++ b/cobalt/network/url_request_context.cc
@@ -70,7 +70,7 @@
URLRequestContext::URLRequestContext(
storage::StorageManager* storage_manager, const std::string& custom_proxy,
net::NetLog* net_log, bool ignore_certificate_errors,
- scoped_refptr<base::SingleThreadTaskRunner> network_task_runner,
+ scoped_refptr<base::SequencedTaskRunner> network_task_runner,
persistent_storage::PersistentSettings* persistent_settings)
: ALLOW_THIS_IN_INITIALIZER_LIST(storage_(this))
#if defined(ENABLE_DEBUGGER)
@@ -230,7 +230,7 @@
}
void URLRequestContext::SetEnableQuic(bool enable_quic) {
- DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
storage_.http_network_session()->SetEnableQuic(enable_quic);
}
diff --git a/cobalt/network/url_request_context.h b/cobalt/network/url_request_context.h
index 8f673c3..6e80479 100644
--- a/cobalt/network/url_request_context.h
+++ b/cobalt/network/url_request_context.h
@@ -19,7 +19,7 @@
#include "base/basictypes.h"
#include "base/macros.h"
-#include "base/threading/thread_checker.h"
+#include "base/sequence_checker.h"
#include "cobalt/persistent_storage/persistent_settings.h"
#include "net/cookies/cookie_monster.h"
#include "net/log/net_log.h"
@@ -43,7 +43,7 @@
URLRequestContext(
storage::StorageManager* storage_manager, const std::string& custom_proxy,
net::NetLog* net_log, bool ignore_certificate_errors,
- scoped_refptr<base::SingleThreadTaskRunner> network_task_runner,
+ scoped_refptr<base::SequencedTaskRunner> network_task_runner,
persistent_storage::PersistentSettings* persistent_settings);
~URLRequestContext() override;
@@ -54,7 +54,7 @@
bool using_http_cache();
private:
- THREAD_CHECKER(thread_checker_);
+ SEQUENCE_CHECKER(sequence_checker_);
net::URLRequestContextStorage storage_;
scoped_refptr<net::CookieMonster::PersistentCookieStore>
persistent_cookie_store_;
diff --git a/cobalt/network/url_request_context_getter.cc b/cobalt/network/url_request_context_getter.cc
index 8db25cc..9bc3791 100644
--- a/cobalt/network/url_request_context_getter.cc
+++ b/cobalt/network/url_request_context_getter.cc
@@ -32,7 +32,7 @@
return url_request_context_;
}
-scoped_refptr<base::SingleThreadTaskRunner>
+scoped_refptr<base::SequencedTaskRunner>
URLRequestContextGetter::GetNetworkTaskRunner() const {
return network_task_runner_;
}
diff --git a/cobalt/network/url_request_context_getter.h b/cobalt/network/url_request_context_getter.h
index 981db33..0695da5 100644
--- a/cobalt/network/url_request_context_getter.h
+++ b/cobalt/network/url_request_context_getter.h
@@ -30,7 +30,7 @@
// Implementation for net::UrlRequestContextGetter.
net::URLRequestContext* GetURLRequestContext() override;
- scoped_refptr<base::SingleThreadTaskRunner> GetNetworkTaskRunner()
+ scoped_refptr<base::SequencedTaskRunner> GetNetworkTaskRunner()
const override;
protected:
@@ -38,7 +38,7 @@
private:
URLRequestContext* url_request_context_;
- scoped_refptr<base::SingleThreadTaskRunner> network_task_runner_;
+ scoped_refptr<base::SequencedTaskRunner> network_task_runner_;
DISALLOW_COPY_AND_ASSIGN(URLRequestContextGetter);
};
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_directgles_color.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_directgles_color.glsl
deleted file mode 100644
index f611e34..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_directgles_color.glsl
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2020 The Cobalt Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-precision mediump float;
-varying vec4 v_color;
-
-void main() {
- gl_FragColor = v_color;
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_directgles_color_blur.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_directgles_color_blur.glsl
deleted file mode 100644
index e7d1c62..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_directgles_color_blur.glsl
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright 2020 The Cobalt Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-precision mediump float;
-
-uniform vec4 u_color;
-uniform vec4 u_blur_rect;
-uniform vec2 u_scale_add;
-
-varying vec2 v_offset;
-
-// Calculate the normalized gaussian integral from (pos.x * k, pos.y * k)
-// where k = sqrt(2) * sigma and pos.x <= pos.y. This is just a 1D filter --
-// the pos.x and pos.y values are expected to be on the same axis.
-float GaussianIntegral(vec2 pos) {
- // Approximation of the error function.
- // For x >= 0,
- // erf(x) = 1 - 1 / (1 + k1 * x + k2 * x^2 + k3 * x^3 + k4 * x^4)^4
- // where k1 = 0.278393, k2 = 0.230389, k3 = 0.000972, k4 = 0.078108.
- // For y < 0,
- // erf(y) = -erf(-y).
- vec2 s = sign(pos);
- vec2 a = abs(pos);
- vec2 t = 1.0 +
- (0.278393 + (0.230389 + (0.000972 + 0.078108 * a) * a) * a) * a;
- vec2 t2 = t * t;
- vec2 erf = s - s / (t2 * t2);
-
- // erf(x) = the integral of the normalized gaussian from [-x * k, x * k],
- // where k = sqrt(2) * sigma. Find the integral from (pos.x * k, pos.y * k).
- return dot(erf, vec2(-0.5, 0.5));
-}
-
-void main() {
- // Get the integral over the interval occupied by the rectangle. Both
- // v_offset and u_blur_rect are already scaled for the integral function.
- float integral = GaussianIntegral(u_blur_rect.xz - v_offset.xx) *
- GaussianIntegral(u_blur_rect.yw - v_offset.yy);
- float blur_scale = integral * u_scale_add.x + u_scale_add.y;
- gl_FragColor = u_color * blur_scale;
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_directgles_color_blur_rrects.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_directgles_color_blur_rrects.glsl
deleted file mode 100644
index 77d9ab0..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_directgles_color_blur_rrects.glsl
+++ /dev/null
@@ -1,150 +0,0 @@
-// Copyright 2020 The Cobalt Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-precision mediump float;
-
-// The blur rounded rect is split into top and bottom halves.
-// The "start" values represent (left_start.xy, right_start.xy).
-// The "scale" values represent (left_radius.x, 1 / left_radius.y,
-// right_radius.x, 1 / right_radius.y). The sign of the scale value helps
-// to translate between position and corner offset values, where the corner
-// offset is positive if the position is inside the rounded corner.
-uniform vec4 u_blur_start_top;
-uniform vec4 u_blur_start_bottom;
-uniform vec4 u_blur_scale_top;
-uniform vec4 u_blur_scale_bottom;
-
-// The blur extent specifies (blur_size, min_rect_y, max_rect_y, center_rect_y).
-uniform vec4 u_blur_extent;
-
-// The scale_add uniform is used to switch the shader between generating
-// outset shadows and inset shadows. It impacts the shadow gradient and
-// scissor behavior. Use (1, 0) to get an outset shadow with the provided
-// scissor rect behaving as an exclusive scissor, and (-1, 1) to get an
-// inset shadow with scissor rect behaving as an inclusive scissor.
-uniform vec2 u_scale_add;
-
-uniform vec4 u_color;
-
-varying vec2 v_offset;
-varying vec4 v_rcorner;
-
-// Return 0 if the given position is inside the rounded corner, or scale
-// towards 1 as it goes outside a 1-pixel anti-aliasing border.
-// |rcorner| is a vec4 representing (scaled.xy, 1 / radius.xy) with scaled.xy
-// representing the offset of the current position in terms of radius.xy
-// (i.e. offset.xy / radius.xy). The scaled.xy values can be negative if the
-// current position is outside the corner start.
-float IsOutsideRCorner(vec4 rcorner) {
- // Estimate the distance to an implicit function using
- // dist = f(x,y) / length(gradient(f(x,y)))
- // For an ellipse, f(x,y) = x^2 / a^2 + y^2 / b^2 - 1.
- highp vec2 scaled = max(rcorner.xy, 0.0);
- highp float implicit = dot(scaled, scaled) - 1.0;
-
- // NOTE: To accommodate large radius values using mediump floats, rcorner.zw
- // was scaled by kRCornerGradientScale in the vertex attribute data.
- // Multiply inv_gradient by kRCornerGradientScale to undo that scaling.
- const highp float kRCornerGradientScale = 16.0;
- highp vec2 gradient = 2.0 * scaled * rcorner.zw;
- highp float inv_gradient = kRCornerGradientScale *
- inversesqrt(max(dot(gradient, gradient), 0.0001));
-
- return clamp(0.5 + implicit * inv_gradient, 0.0, 1.0);
-}
-
-// Calculate the normalized gaussian integral from (pos.x * k, pos.y * k)
-// where k = sqrt(2) * sigma and pos.x <= pos.y. This is just a 1D filter --
-// the pos.x and pos.y values are expected to be on the same axis.
-float GaussianIntegral(vec2 pos) {
- // Approximation of the error function.
- // For x >= 0,
- // erf(x) = 1 - 1 / (1 + k1 * x + k2 * x^2 + k3 * x^3 + k4 * x^4)^4
- // where k1 = 0.278393, k2 = 0.230389, k3 = 0.000972, k4 = 0.078108.
- // For y < 0,
- // erf(y) = -erf(-y).
- vec2 s = sign(pos);
- vec2 a = abs(pos);
- vec2 t = 1.0 +
- (0.278393 + (0.230389 + (0.000972 + 0.078108 * a) * a) * a) * a;
- vec2 t2 = t * t;
- vec2 erf = s - s / (t2 * t2);
-
- // erf(x) = the integral of the normalized gaussian from [-x * k, x * k],
- // where k = sqrt(2) * sigma. Find the integral from (pos.x * k, pos.y * k).
- return dot(erf, vec2(-0.5, 0.5));
-}
-
-float GetXBlur(float x, float y) {
- // Solve for X of the rounded corners at the given Y based on the equation
- // for an ellipse: x^2 / a^2 + y^2 / b^2 = 1.
- vec4 corner_start =
- (y < u_blur_extent.w) ? u_blur_start_top : u_blur_start_bottom;
- vec4 corner_scale =
- (y < u_blur_extent.w) ? u_blur_scale_top : u_blur_scale_bottom;
- vec2 scaled = clamp((y - corner_start.yw) * corner_scale.yw, 0.0, 1.0);
- vec2 root = sqrt(1.0 - scaled * scaled);
- vec2 extent_x = corner_start.xz + corner_scale.xz * root;
-
- // Get the integral over the interval occupied by the rectangle.
- return GaussianIntegral(extent_x - x);
-}
-
-float GetBlur(vec2 pos) {
- // Approximate the 2D gaussian filter using numerical integration. Sample
- // points between the y extents of the rectangle.
- float low = clamp(pos.y - u_blur_extent.x, u_blur_extent.y, u_blur_extent.z);
- float high = clamp(pos.y + u_blur_extent.x, u_blur_extent.y, u_blur_extent.z);
-
- // Use the Gauss–Legendre quadrature with 6 points to numerically integrate.
- // Using fewer samples will show artifacts with elliptical corners that are
- // likely to be used.
- const vec3 kStepScale1 = vec3(-0.932470, -0.661209, -0.238619);
- const vec3 kStepScale2 = vec3( 0.932470, 0.661209, 0.238619);
- const vec3 kWeight = vec3(0.171324, 0.360762, 0.467914);
-
- float half_size = (high - low) * 0.5;
- float middle = (high + low) * 0.5;
- vec3 weight = half_size * kWeight;
- vec3 pos1 = middle + half_size * kStepScale1;
- vec3 pos2 = middle + half_size * kStepScale2;
- vec3 offset1 = pos1 - pos.yyy;
- vec3 offset2 = pos2 - pos.yyy;
-
- // The integral along the x-axis is computed. The integral along the y-axis
- // is roughly approximated. To get the 2D filter, multiply the two integrals.
- // Visual artifacts appear when the computed integrals along the x-axis
- // change rapidly between samples (e.g. elliptical corners that are much
- // wider than they are tall).
- vec3 xblur1 = vec3(GetXBlur(pos.x, pos1.x),
- GetXBlur(pos.x, pos1.y),
- GetXBlur(pos.x, pos1.z));
- vec3 xblur2 = vec3(GetXBlur(pos.x, pos2.x),
- GetXBlur(pos.x, pos2.y),
- GetXBlur(pos.x, pos2.z));
- vec3 yblur1 = exp(-offset1 * offset1) * weight;
- vec3 yblur2 = exp(-offset2 * offset2) * weight;
-
- // Since each yblur value should be normalized by kNormalizeGaussian, just
- // scale the sum by it.
- const float kNormalizeGaussian = 0.564189584; // 1 / sqrt(pi)
- return (dot(xblur1, yblur1) + dot(xblur2, yblur2)) * kNormalizeGaussian;
-}
-
-void main() {
- float scissor_scale =
- IsOutsideRCorner(v_rcorner) * u_scale_add.x + u_scale_add.y;
- float blur_scale = GetBlur(v_offset) * u_scale_add.x + u_scale_add.y;
- gl_FragColor = u_color * (blur_scale * scissor_scale);
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_directgles_color_include.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_directgles_color_include.glsl
deleted file mode 100644
index ddeb45d..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_directgles_color_include.glsl
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2020 The Cobalt Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-precision highp float;
-uniform vec4 u_include; // include scissor (x_min, y_min, x_max, y_max)
-varying vec2 v_offset;
-varying vec4 v_color;
-
-void main() {
- vec2 include = step(u_include.xy, v_offset) * step(v_offset, u_include.zw);
- gl_FragColor = v_color * include.x * include.y;
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_directgles_color_texcoord.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_directgles_color_texcoord.glsl
deleted file mode 100644
index 5ff73f6..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_directgles_color_texcoord.glsl
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2020 The Cobalt Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-precision mediump float;
-varying vec4 v_color;
-varying vec2 v_texcoord;
-
-uniform vec4 u_texcoord_clamp_rgba;
-uniform sampler2D u_texture_rgba;
-
-#pragma array u_texcoord_clamp(u_texcoord_clamp_rgba);
-#pragma array u_texture(u_texture_rgba);
-
-vec4 GetRgba() {
- return texture2D(u_texture_rgba,
- clamp(v_texcoord, u_texcoord_clamp_rgba.xy, u_texcoord_clamp_rgba.zw));
-}
-
-void main() {
- gl_FragColor = v_color * GetRgba();
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_directgles_color_texcoord_yuv3.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_directgles_color_texcoord_yuv3.glsl
deleted file mode 100644
index ff766ea..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_directgles_color_texcoord_yuv3.glsl
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2020 The Cobalt Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-precision mediump float;
-varying vec4 v_color;
-varying vec2 v_texcoord;
-
-uniform vec4 u_texcoord_clamp_y;
-uniform vec4 u_texcoord_clamp_u;
-uniform vec4 u_texcoord_clamp_v;
-uniform sampler2D u_texture_y;
-uniform sampler2D u_texture_u;
-uniform sampler2D u_texture_v;
-uniform mat4 u_color_transform_matrix;
-
-#pragma array u_texcoord_clamp(u_texcoord_clamp_y, u_texcoord_clamp_u, u_texcoord_clamp_v);
-#pragma array u_texture(u_texture_y, u_texture_u, u_texture_v);
-
-vec4 GetRgba() {
- float y = texture2D(u_texture_y,
- clamp(v_texcoord, u_texcoord_clamp_y.xy, u_texcoord_clamp_y.zw)).a;
- float u = texture2D(u_texture_u,
- clamp(v_texcoord, u_texcoord_clamp_u.xy, u_texcoord_clamp_u.zw)).a;
- float v = texture2D(u_texture_v,
- clamp(v_texcoord, u_texcoord_clamp_v.xy, u_texcoord_clamp_v.zw)).a;
- vec4 rgba = u_color_transform_matrix * vec4(y, u, v, 1);
-
- return clamp(rgba, vec4(0, 0, 0, 0), vec4(1, 1, 1, 1));
-}
-
-void main() {
- gl_FragColor = v_color * GetRgba();
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_directgles_opacity_texcoord1d.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_directgles_opacity_texcoord1d.glsl
deleted file mode 100644
index f41f1ca..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_directgles_opacity_texcoord1d.glsl
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2020 The Cobalt Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-precision mediump float;
-uniform sampler2D u_texture;
-uniform vec4 u_texcoord_transform; // (u-scale, u-add, u-max, v-center)
-uniform float u_opacity;
-varying vec2 v_offset;
-
-void main() {
- vec2 texcoord = vec2(
- min(length(v_offset) * u_texcoord_transform.x + u_texcoord_transform.y,
- u_texcoord_transform.z),
- u_texcoord_transform.w);
- gl_FragColor = texture2D(u_texture, texcoord) * u_opacity;
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_directgles_rcorner2_color.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_directgles_rcorner2_color.glsl
deleted file mode 100644
index e245d3a..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_directgles_rcorner2_color.glsl
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2020 The Cobalt Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-precision mediump float;
-
-uniform vec4 u_color;
-varying vec4 v_rcorner_inner;
-varying vec4 v_rcorner_outer;
-
-// Return 0 if the given position is inside the rounded corner, or scale
-// towards 1 as it goes outside a 1-pixel anti-aliasing border.
-// |rcorner| is a vec4 representing (scaled.xy, 1 / radius.xy) with scaled.xy
-// representing the offset of the current position in terms of radius.xy
-// (i.e. offset.xy / radius.xy). The scaled.xy values can be negative if the
-// current position is outside the corner start.
-float IsOutsideRCorner(vec4 rcorner) {
- // Estimate the distance to an implicit function using
- // dist = f(x,y) / length(gradient(f(x,y)))
- // For an ellipse, f(x,y) = x^2 / a^2 + y^2 / b^2 - 1.
- highp vec2 scaled = max(rcorner.xy, 0.0);
- highp float implicit = dot(scaled, scaled) - 1.0;
-
- // NOTE: To accommodate large radius values using mediump floats, rcorner.zw
- // was scaled by kRCornerGradientScale in the vertex attribute data.
- // Multiply inv_gradient by kRCornerGradientScale to undo that scaling.
- const highp float kRCornerGradientScale = 16.0;
- highp vec2 gradient = 2.0 * scaled * rcorner.zw;
- highp float inv_gradient = kRCornerGradientScale *
- inversesqrt(max(dot(gradient, gradient), 0.0001));
-
- return clamp(0.5 + implicit * inv_gradient, 0.0, 1.0);
-}
-
-void main() {
- float inner_scale = IsOutsideRCorner(v_rcorner_inner);
- float outer_scale = 1.0 - IsOutsideRCorner(v_rcorner_outer);
- gl_FragColor = u_color * (inner_scale * outer_scale);
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_directgles_rcorner_color.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_directgles_rcorner_color.glsl
deleted file mode 100644
index b818cf9..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_directgles_rcorner_color.glsl
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2020 The Cobalt Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-precision mediump float;
-
-uniform vec4 u_color;
-varying vec4 v_rcorner;
-
-// Return 0 if the given position is inside the rounded corner, or scale
-// towards 1 as it goes outside a 1-pixel anti-aliasing border.
-// |rcorner| is a vec4 representing (scaled.xy, 1 / radius.xy) with scaled.xy
-// representing the offset of the current position in terms of radius.xy
-// (i.e. offset.xy / radius.xy). The scaled.xy values can be negative if the
-// current position is outside the corner start.
-float IsOutsideRCorner(vec4 rcorner) {
- // Estimate the distance to an implicit function using
- // dist = f(x,y) / length(gradient(f(x,y)))
- // For an ellipse, f(x,y) = x^2 / a^2 + y^2 / b^2 - 1.
- highp vec2 scaled = max(rcorner.xy, 0.0);
- highp float implicit = dot(scaled, scaled) - 1.0;
-
- // NOTE: To accommodate large radius values using mediump floats, rcorner.zw
- // was scaled by kRCornerGradientScale in the vertex attribute data.
- // Multiply inv_gradient by kRCornerGradientScale to undo that scaling.
- const highp float kRCornerGradientScale = 16.0;
- highp vec2 gradient = 2.0 * scaled * rcorner.zw;
- highp float inv_gradient = kRCornerGradientScale *
- inversesqrt(max(dot(gradient, gradient), 0.0001));
-
- return clamp(0.5 + implicit * inv_gradient, 0.0, 1.0);
-}
-
-void main() {
- float scale = IsOutsideRCorner(v_rcorner);
- gl_FragColor = u_color * (1.0 - scale);
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_directgles_rcorner_texcoord_color.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_directgles_rcorner_texcoord_color.glsl
deleted file mode 100644
index 5db8675..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_directgles_rcorner_texcoord_color.glsl
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2020 The Cobalt Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-precision mediump float;
-
-uniform vec4 u_color;
-
-varying vec4 v_rcorner;
-varying vec2 v_texcoord;
-
-// Return 0 if the given position is inside the rounded corner, or scale
-// towards 1 as it goes outside a 1-pixel anti-aliasing border.
-// |rcorner| is a vec4 representing (scaled.xy, 1 / radius.xy) with scaled.xy
-// representing the offset of the current position in terms of radius.xy
-// (i.e. offset.xy / radius.xy). The scaled.xy values can be negative if the
-// current position is outside the corner start.
-float IsOutsideRCorner(vec4 rcorner) {
- // Estimate the distance to an implicit function using
- // dist = f(x,y) / length(gradient(f(x,y)))
- // For an ellipse, f(x,y) = x^2 / a^2 + y^2 / b^2 - 1.
- highp vec2 scaled = max(rcorner.xy, 0.0);
- highp float implicit = dot(scaled, scaled) - 1.0;
-
- // NOTE: To accommodate large radius values using mediump floats, rcorner.zw
- // was scaled by kRCornerGradientScale in the vertex attribute data.
- // Multiply inv_gradient by kRCornerGradientScale to undo that scaling.
- const highp float kRCornerGradientScale = 16.0;
- highp vec2 gradient = 2.0 * scaled * rcorner.zw;
- highp float inv_gradient = kRCornerGradientScale *
- inversesqrt(max(dot(gradient, gradient), 0.0001));
-
- return clamp(0.5 + implicit * inv_gradient, 0.0, 1.0);
-}
-
-uniform vec4 u_texcoord_clamp_rgba;
-uniform sampler2D u_texture_rgba;
-
-#pragma array u_texcoord_clamp(u_texcoord_clamp_rgba);
-#pragma array u_texture(u_texture_rgba);
-
-vec4 GetRgba() {
- return texture2D(u_texture_rgba,
- clamp(v_texcoord, u_texcoord_clamp_rgba.xy, u_texcoord_clamp_rgba.zw));
-}
-
-void main() {
- float scale = IsOutsideRCorner(v_rcorner);
- gl_FragColor = u_color * (1.0 - scale) * GetRgba();
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_directgles_rcorner_texcoord_color_yuv3.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_directgles_rcorner_texcoord_color_yuv3.glsl
deleted file mode 100644
index 146664e..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_directgles_rcorner_texcoord_color_yuv3.glsl
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2020 The Cobalt Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-precision mediump float;
-
-uniform vec4 u_color;
-
-varying vec4 v_rcorner;
-varying vec2 v_texcoord;
-
-// Return 0 if the given position is inside the rounded corner, or scale
-// towards 1 as it goes outside a 1-pixel anti-aliasing border.
-// |rcorner| is a vec4 representing (scaled.xy, 1 / radius.xy) with scaled.xy
-// representing the offset of the current position in terms of radius.xy
-// (i.e. offset.xy / radius.xy). The scaled.xy values can be negative if the
-// current position is outside the corner start.
-float IsOutsideRCorner(vec4 rcorner) {
- // Estimate the distance to an implicit function using
- // dist = f(x,y) / length(gradient(f(x,y)))
- // For an ellipse, f(x,y) = x^2 / a^2 + y^2 / b^2 - 1.
- highp vec2 scaled = max(rcorner.xy, 0.0);
- highp float implicit = dot(scaled, scaled) - 1.0;
-
- // NOTE: To accommodate large radius values using mediump floats, rcorner.zw
- // was scaled by kRCornerGradientScale in the vertex attribute data.
- // Multiply inv_gradient by kRCornerGradientScale to undo that scaling.
- const highp float kRCornerGradientScale = 16.0;
- highp vec2 gradient = 2.0 * scaled * rcorner.zw;
- highp float inv_gradient = kRCornerGradientScale *
- inversesqrt(max(dot(gradient, gradient), 0.0001));
-
- return clamp(0.5 + implicit * inv_gradient, 0.0, 1.0);
-}
-
-uniform vec4 u_texcoord_clamp_y;
-uniform vec4 u_texcoord_clamp_u;
-uniform vec4 u_texcoord_clamp_v;
-uniform sampler2D u_texture_y;
-uniform sampler2D u_texture_u;
-uniform sampler2D u_texture_v;
-uniform mat4 u_color_transform_matrix;
-
-#pragma array u_texcoord_clamp(u_texcoord_clamp_y, u_texcoord_clamp_u, u_texcoord_clamp_v);
-#pragma array u_texture(u_texture_y, u_texture_u, u_texture_v);
-
-vec4 GetRgba() {
- float y = texture2D(u_texture_y,
- clamp(v_texcoord, u_texcoord_clamp_y.xy, u_texcoord_clamp_y.zw)).a;
- float u = texture2D(u_texture_u,
- clamp(v_texcoord, u_texcoord_clamp_u.xy, u_texcoord_clamp_u.zw)).a;
- float v = texture2D(u_texture_v,
- clamp(v_texcoord, u_texcoord_clamp_v.xy, u_texcoord_clamp_v.zw)).a;
- vec4 rgba = u_color_transform_matrix * vec4(y, u, v, 1);
-
- return clamp(rgba, vec4(0, 0, 0, 0), vec4(1, 1, 1, 1));
-}
-
-void main() {
- float scale = IsOutsideRCorner(v_rcorner);
- gl_FragColor = u_color * (1.0 - scale) * GetRgba();
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_directgles_texcoord.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_directgles_texcoord.glsl
deleted file mode 100644
index 20e2228..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_directgles_texcoord.glsl
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2020 The Cobalt Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-precision mediump float;
-varying vec2 v_texcoord;
-
-uniform sampler2D u_texture_rgba;
-
-#pragma array u_texture(u_texture_rgba);
-
-vec4 GetRgba() {
- return texture2D(u_texture_rgba, v_texcoord);
-}
-
-void main() {
- gl_FragColor = GetRgba();
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_directgles_texcoord_yuv3.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_directgles_texcoord_yuv3.glsl
deleted file mode 100644
index 40bc9fa..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_directgles_texcoord_yuv3.glsl
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2020 The Cobalt Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-precision mediump float;
-varying vec2 v_texcoord;
-
-uniform sampler2D u_texture_y;
-uniform sampler2D u_texture_u;
-uniform sampler2D u_texture_v;
-uniform mat4 u_color_transform_matrix;
-
-#pragma array u_texture(u_texture_y, u_texture_u, u_texture_v);
-
-vec4 GetRgba() {
- float y = texture2D(u_texture_y, v_texcoord).a;
- float u = texture2D(u_texture_u, v_texcoord).a;
- float v = texture2D(u_texture_v, v_texcoord).a;
- vec4 rgba = u_color_transform_matrix * vec4(y, u, v, 1);
-
- return clamp(rgba, vec4(0, 0, 0, 0), vec4(1, 1, 1, 1));
-}
-
-void main() {
- gl_FragColor = GetRgba();
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_ellipse_with_texture_and_some_alpha.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_ellipse_with_texture_and_some_alpha.glsl
deleted file mode 100644
index c37000f..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_ellipse_with_texture_and_some_alpha.glsl
+++ /dev/null
@@ -1,41 +0,0 @@
-#version 100
-
-uniform highp float u_skRTHeight;
-precision mediump float;
-uniform vec4 uinnerRect_Stage2;
-uniform vec4 uinvRadiiLTRB_Stage2;
-uniform highp sampler2D uTextureSampler_0_Stage1;
-varying mediump vec4 vcolor_Stage0;
-varying highp vec2 vTransformedCoords_0_Stage0;
-void main() {
-highp vec2 _sktmpCoord = gl_FragCoord.xy;
-highp vec4 sk_FragCoord = vec4(_sktmpCoord.x, u_skRTHeight - _sktmpCoord.y, 1.0, 1.0);
- vec4 outputColor_Stage0;
- {
- outputColor_Stage0 = vcolor_Stage0;
- }
- vec4 output_Stage1;
- {
- vec4 child;
- {
- child = texture2D(uTextureSampler_0_Stage1, vTransformedCoords_0_Stage0).xyzw;
- }
- output_Stage1 = child * outputColor_Stage0.w;
- }
- vec4 output_Stage2;
- {
- vec2 dxy0 = uinnerRect_Stage2.xy - sk_FragCoord.xy;
- vec2 dxy1 = sk_FragCoord.xy - uinnerRect_Stage2.zw;
- vec2 dxy = max(max(dxy0, dxy1), 0.0);
- vec2 Z = max(max(dxy0 * uinvRadiiLTRB_Stage2.xy, dxy1 * uinvRadiiLTRB_Stage2.zw), 0.0);
- float implicit = dot(Z, dxy) - 1.0;
- float grad_dot = 4.0 * dot(Z, Z);
- grad_dot = max(grad_dot, 0.0001);
- float approx_dist = implicit * inversesqrt(grad_dot);
- float alpha = clamp(0.5 - approx_dist, 0.0, 1.0);
- output_Stage2 = vec4(alpha);
- }
- {
- gl_FragColor = output_Stage1 * output_Stage2;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_position_and_texcoord.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_position_and_texcoord.glsl
deleted file mode 100644
index ca7ad4e..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_position_and_texcoord.glsl
+++ /dev/null
@@ -1,7 +0,0 @@
-precision mediump float;
-varying vec2 v_tex_coord;
-uniform sampler2D tex;
-
-void main() {
- gl_FragColor = texture2D(tex, v_tex_coord);
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_circle_color_scale_threshold.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_circle_color_scale_threshold.glsl
deleted file mode 100644
index 6950864..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_circle_color_scale_threshold.glsl
+++ /dev/null
@@ -1,72 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uleftBorderColor_Stage1_c0_c0;
-uniform mediump vec4 urightBorderColor_Stage1_c0_c0;
-uniform highp vec4 uscale01_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias01_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale23_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias23_Stage1_c0_c0_c1_c0;
-uniform mediump float uthreshold_Stage1_c0_c0_c1_c0;
-varying highp vec4 vinCircleEdge_Stage0;
-varying mediump vec4 vinColor_Stage0;
-varying highp vec2 vTransformedCoords_0_Stage0;
-mediump vec4 stage_Stage1_c0_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1099_c0_c0;
- mediump float t = length(vTransformedCoords_0_Stage0);
- _sample1099_c0_c0 = vec4(t, 1.0, 0.0, 0.0);
- return _sample1099_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0_c1_c0(mediump vec4 _input) {
- mediump vec4 _sample1767_c0_c0;
- mediump float t = _input.x;
- highp vec4 scale, bias;
- if (t < uthreshold_Stage1_c0_c0_c1_c0) {
- scale = uscale01_Stage1_c0_c0_c1_c0;
- bias = ubias01_Stage1_c0_c0_c1_c0;
- } else {
- scale = uscale23_Stage1_c0_c0_c1_c0;
- bias = ubias23_Stage1_c0_c0_c1_c0;
- }
- _sample1767_c0_c0 = t * scale + bias;
- return _sample1767_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 child;
- mediump vec4 _sample1099_c0_c0;
- _sample1099_c0_c0 = stage_Stage1_c0_c0_c0_c0(vec4(1.0));
- mediump vec4 t = _sample1099_c0_c0;
- if (t.x < 0.0) {
- child = uleftBorderColor_Stage1_c0_c0;
- } else if (t.x > 1.0) {
- child = urightBorderColor_Stage1_c0_c0;
- } else {
- mediump vec4 _sample1767_c0_c0;
- _sample1767_c0_c0 = stage_Stage1_c0_c0_c1_c0(t);
- child = _sample1767_c0_c0;
- }
- return child;
-}
-void main() {
- mediump vec4 outputColor_Stage0;
- mediump vec4 outputCoverage_Stage0;
- {
- highp vec4 circleEdge;
- circleEdge = vinCircleEdge_Stage0;
- outputColor_Stage0 = vinColor_Stage0;
- highp float d = length(circleEdge.xy);
- mediump float distanceToOuterEdge = circleEdge.z * (1.0 - d);
- mediump float edgeAlpha = clamp(distanceToOuterEdge, 0.0, 1.0);
- outputCoverage_Stage0 = vec4(edgeAlpha);
- }
- mediump vec4 output_Stage1;
- {
- mediump vec4 child;
- child = stage_Stage1_c0_c0(vec4(1.0));
- output_Stage1 = child * outputColor_Stage0.w;
- }
- {
- gl_FragColor = output_Stage1 * outputCoverage_Stage0;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_circle_color_scale_threshold_2.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_circle_color_scale_threshold_2.glsl
deleted file mode 100644
index 1be401c..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_circle_color_scale_threshold_2.glsl
+++ /dev/null
@@ -1,75 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uleftBorderColor_Stage1_c0_c0;
-uniform mediump vec4 urightBorderColor_Stage1_c0_c0;
-uniform highp vec4 uscale01_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias01_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale23_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias23_Stage1_c0_c0_c1_c0;
-uniform mediump float uthreshold_Stage1_c0_c0_c1_c0;
-varying highp vec4 vinCircleEdge_Stage0;
-varying mediump vec4 vinColor_Stage0;
-varying highp vec2 vTransformedCoords_0_Stage0;
-mediump vec4 stage_Stage1_c0_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1099_c0_c0;
- mediump float t = vTransformedCoords_0_Stage0.x + 9.999999747378752e-06;
- _sample1099_c0_c0 = vec4(t, 1.0, 0.0, 0.0);
- return _sample1099_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0_c1_c0(mediump vec4 _input) {
- mediump vec4 _sample1767_c0_c0;
- mediump float t = _input.x;
- highp vec4 scale, bias;
- if (t < uthreshold_Stage1_c0_c0_c1_c0) {
- scale = uscale01_Stage1_c0_c0_c1_c0;
- bias = ubias01_Stage1_c0_c0_c1_c0;
- } else {
- scale = uscale23_Stage1_c0_c0_c1_c0;
- bias = ubias23_Stage1_c0_c0_c1_c0;
- }
- _sample1767_c0_c0 = t * scale + bias;
- return _sample1767_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 child;
- mediump vec4 _sample1099_c0_c0;
- _sample1099_c0_c0 = stage_Stage1_c0_c0_c0_c0(vec4(1.0));
- mediump vec4 t = _sample1099_c0_c0;
- if (t.x < 0.0) {
- child = uleftBorderColor_Stage1_c0_c0;
- } else if (t.x > 1.0) {
- child = urightBorderColor_Stage1_c0_c0;
- } else {
- mediump vec4 _sample1767_c0_c0;
- _sample1767_c0_c0 = stage_Stage1_c0_c0_c1_c0(t);
- child = _sample1767_c0_c0;
- }
- {
- child.xyz *= child.w;
- }
- return child;
-}
-void main() {
- mediump vec4 outputColor_Stage0;
- mediump vec4 outputCoverage_Stage0;
- {
- highp vec4 circleEdge;
- circleEdge = vinCircleEdge_Stage0;
- outputColor_Stage0 = vinColor_Stage0;
- highp float d = length(circleEdge.xy);
- mediump float distanceToOuterEdge = circleEdge.z * (1.0 - d);
- mediump float edgeAlpha = clamp(distanceToOuterEdge, 0.0, 1.0);
- outputCoverage_Stage0 = vec4(edgeAlpha);
- }
- mediump vec4 output_Stage1;
- {
- mediump vec4 child;
- child = stage_Stage1_c0_c0(vec4(1.0));
- output_Stage1 = child * outputColor_Stage0.w;
- }
- {
- gl_FragColor = output_Stage1 * outputCoverage_Stage0;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_circle_color_scale_threshold_3.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_circle_color_scale_threshold_3.glsl
deleted file mode 100644
index 02f89e1..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_circle_color_scale_threshold_3.glsl
+++ /dev/null
@@ -1,75 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uleftBorderColor_Stage1_c0_c0;
-uniform mediump vec4 urightBorderColor_Stage1_c0_c0;
-uniform highp vec4 uscale01_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias01_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale23_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias23_Stage1_c0_c0_c1_c0;
-uniform mediump float uthreshold_Stage1_c0_c0_c1_c0;
-varying highp vec4 vinCircleEdge_Stage0;
-varying mediump vec4 vinColor_Stage0;
-varying highp vec2 vTransformedCoords_0_Stage0;
-mediump vec4 stage_Stage1_c0_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1099_c0_c0;
- mediump float t = vTransformedCoords_0_Stage0.x + 9.999999747378752e-06;
- _sample1099_c0_c0 = vec4(t, 1.0, 0.0, 0.0);
- return _sample1099_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0_c1_c0(mediump vec4 _input) {
- mediump vec4 _sample1767_c0_c0;
- mediump float t = _input.x;
- highp vec4 scale, bias;
- if (t < uthreshold_Stage1_c0_c0_c1_c0) {
- scale = uscale01_Stage1_c0_c0_c1_c0;
- bias = ubias01_Stage1_c0_c0_c1_c0;
- } else {
- scale = uscale23_Stage1_c0_c0_c1_c0;
- bias = ubias23_Stage1_c0_c0_c1_c0;
- }
- _sample1767_c0_c0 = t * scale + bias;
- return _sample1767_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 child;
- mediump vec4 _sample1099_c0_c0;
- _sample1099_c0_c0 = stage_Stage1_c0_c0_c0_c0(vec4(1.0));
- mediump vec4 t = _sample1099_c0_c0;
- if (t.x < 0.0) {
- child = uleftBorderColor_Stage1_c0_c0;
- } else if (t.x > 1.0) {
- child = urightBorderColor_Stage1_c0_c0;
- } else {
- mediump vec4 _sample1767_c0_c0;
- _sample1767_c0_c0 = stage_Stage1_c0_c0_c1_c0(t);
- child = _sample1767_c0_c0;
- }
- return child;
-}
-void main() {
- mediump vec4 outputColor_Stage0;
- mediump vec4 outputCoverage_Stage0;
- {
- highp vec4 circleEdge;
- circleEdge = vinCircleEdge_Stage0;
- outputColor_Stage0 = vinColor_Stage0;
- highp float d = length(circleEdge.xy);
- mediump float distanceToOuterEdge = circleEdge.z * (1.0 - d);
- mediump float edgeAlpha = clamp(distanceToOuterEdge, 0.0, 1.0);
- mediump float distanceToInnerEdge = circleEdge.z * (d - circleEdge.w);
- mediump float innerAlpha = clamp(distanceToInnerEdge, 0.0, 1.0);
- edgeAlpha *= innerAlpha;
- outputCoverage_Stage0 = vec4(edgeAlpha);
- }
- mediump vec4 output_Stage1;
- {
- mediump vec4 child;
- child = stage_Stage1_c0_c0(vec4(1.0));
- output_Stage1 = child * outputColor_Stage0.w;
- }
- {
- gl_FragColor = output_Stage1 * outputCoverage_Stage0;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_circle_color_scale_thresholds.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_circle_color_scale_thresholds.glsl
deleted file mode 100644
index 11db997..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_circle_color_scale_thresholds.glsl
+++ /dev/null
@@ -1,93 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uleftBorderColor_Stage1_c0_c0;
-uniform mediump vec4 urightBorderColor_Stage1_c0_c0;
-uniform highp vec4 uscale0_1_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale2_3_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale4_5_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale6_7_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias0_1_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias2_3_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias4_5_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias6_7_Stage1_c0_c0_c1_c0;
-uniform mediump vec4 uthresholds1_7_Stage1_c0_c0_c1_c0;
-uniform mediump vec4 uthresholds9_13_Stage1_c0_c0_c1_c0;
-varying highp vec4 vinCircleEdge_Stage0;
-varying mediump vec4 vinColor_Stage0;
-varying highp vec2 vTransformedCoords_0_Stage0;
-mediump vec4 stage_Stage1_c0_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1099_c0_c0;
- mediump float t = vTransformedCoords_0_Stage0.x + 9.999999747378752e-06;
- _sample1099_c0_c0 = vec4(t, 1.0, 0.0, 0.0);
- return _sample1099_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0_c1_c0(mediump vec4 _input) {
- mediump vec4 _sample1767_c0_c0;
- mediump float t = _input.x;
- highp vec4 scale, bias;
- {
- if (t < uthresholds1_7_Stage1_c0_c0_c1_c0.y) {
- if (t < uthresholds1_7_Stage1_c0_c0_c1_c0.x) {
- scale = uscale0_1_Stage1_c0_c0_c1_c0;
- bias = ubias0_1_Stage1_c0_c0_c1_c0;
- } else {
- scale = uscale2_3_Stage1_c0_c0_c1_c0;
- bias = ubias2_3_Stage1_c0_c0_c1_c0;
- }
- } else {
- if (t < uthresholds1_7_Stage1_c0_c0_c1_c0.z) {
- scale = uscale4_5_Stage1_c0_c0_c1_c0;
- bias = ubias4_5_Stage1_c0_c0_c1_c0;
- } else {
- scale = uscale6_7_Stage1_c0_c0_c1_c0;
- bias = ubias6_7_Stage1_c0_c0_c1_c0;
- }
- }
- }
- _sample1767_c0_c0 = t * scale + bias;
- return _sample1767_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1992;
- mediump vec4 _sample1099_c0_c0;
- _sample1099_c0_c0 = stage_Stage1_c0_c0_c0_c0(vec4(1.0));
- mediump vec4 t = _sample1099_c0_c0;
- if (t.x < 0.0) {
- _sample1992 = uleftBorderColor_Stage1_c0_c0;
- } else if (t.x > 1.0) {
- _sample1992 = urightBorderColor_Stage1_c0_c0;
- } else {
- mediump vec4 _sample1767_c0_c0;
- _sample1767_c0_c0 = stage_Stage1_c0_c0_c1_c0(t);
- _sample1992 = _sample1767_c0_c0;
- }
- {
- _sample1992.xyz *= _sample1992.w;
- }
- return _sample1992;
-}
-void main() {
- mediump vec4 outputCoverage_Stage0;
- {
- highp vec4 circleEdge;
- circleEdge = vinCircleEdge_Stage0;
- highp float d = length(circleEdge.xy);
- mediump float distanceToOuterEdge = circleEdge.z * (1.0 - d);
- mediump float edgeAlpha = clamp(distanceToOuterEdge, 0.0, 1.0);
- mediump float distanceToInnerEdge = circleEdge.z * (d - circleEdge.w);
- mediump float innerAlpha = clamp(distanceToInnerEdge, 0.0, 1.0);
- edgeAlpha *= innerAlpha;
- outputCoverage_Stage0 = vec4(edgeAlpha);
- }
- mediump vec4 output_Stage1;
- {
- mediump vec4 _sample1992;
- _sample1992 = stage_Stage1_c0_c0(vec4(1.0, 1.0, 1.0, 1.0));
- output_Stage1 = _sample1992;
- }
- {
- gl_FragColor = output_Stage1 * outputCoverage_Stage0;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_coverage_scale_threshold.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_coverage_scale_threshold.glsl
deleted file mode 100644
index e1600ec..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_coverage_scale_threshold.glsl
+++ /dev/null
@@ -1,70 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uColor_Stage0;
-uniform mediump vec4 uleftBorderColor_Stage1_c0_c0;
-uniform mediump vec4 urightBorderColor_Stage1_c0_c0;
-uniform highp vec4 uscale01_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias01_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale23_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias23_Stage1_c0_c0_c1_c0;
-uniform mediump float uthreshold_Stage1_c0_c0_c1_c0;
-varying highp vec2 vTransformedCoords_0_Stage0;
-varying mediump float vinCoverage_Stage0;
-mediump vec4 stage_Stage1_c0_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1099_c0_c0;
- mediump float t = length(vTransformedCoords_0_Stage0);
- _sample1099_c0_c0 = vec4(t, 1.0, 0.0, 0.0);
- return _sample1099_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0_c1_c0(mediump vec4 _input) {
- mediump vec4 _sample1767_c0_c0;
- mediump float t = _input.x;
- highp vec4 scale, bias;
- if (t < uthreshold_Stage1_c0_c0_c1_c0) {
- scale = uscale01_Stage1_c0_c0_c1_c0;
- bias = ubias01_Stage1_c0_c0_c1_c0;
- } else {
- scale = uscale23_Stage1_c0_c0_c1_c0;
- bias = ubias23_Stage1_c0_c0_c1_c0;
- }
- _sample1767_c0_c0 = t * scale + bias;
- return _sample1767_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1992;
- mediump vec4 _sample1099_c0_c0;
- _sample1099_c0_c0 = stage_Stage1_c0_c0_c0_c0(vec4(1.0));
- mediump vec4 t = _sample1099_c0_c0;
- if (t.x < 0.0) {
- _sample1992 = uleftBorderColor_Stage1_c0_c0;
- } else if (t.x > 1.0) {
- _sample1992 = urightBorderColor_Stage1_c0_c0;
- } else {
- mediump vec4 _sample1767_c0_c0;
- _sample1767_c0_c0 = stage_Stage1_c0_c0_c1_c0(t);
- _sample1992 = _sample1767_c0_c0;
- }
- {
- _sample1992.xyz *= _sample1992.w;
- }
- return _sample1992;
-}
-void main() {
- mediump vec4 outputCoverage_Stage0;
- {
- mediump float alpha = 1.0;
- alpha = vinCoverage_Stage0;
- outputCoverage_Stage0 = vec4(alpha);
- }
- mediump vec4 output_Stage1;
- {
- mediump vec4 _sample1992;
- _sample1992 = stage_Stage1_c0_c0(vec4(1.0, 1.0, 1.0, 1.0));
- output_Stage1 = _sample1992;
- }
- {
- gl_FragColor = output_Stage1 * outputCoverage_Stage0;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_coverage_scale_threshold_2.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_coverage_scale_threshold_2.glsl
deleted file mode 100644
index e7447a2..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_coverage_scale_threshold_2.glsl
+++ /dev/null
@@ -1,67 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uColor_Stage0;
-uniform mediump vec4 uleftBorderColor_Stage1_c0_c0;
-uniform mediump vec4 urightBorderColor_Stage1_c0_c0;
-uniform highp vec4 uscale01_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias01_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale23_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias23_Stage1_c0_c0_c1_c0;
-uniform mediump float uthreshold_Stage1_c0_c0_c1_c0;
-varying highp vec2 vTransformedCoords_0_Stage0;
-varying mediump float vinCoverage_Stage0;
-mediump vec4 stage_Stage1_c0_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1099_c0_c0;
- mediump float t = vTransformedCoords_0_Stage0.x + 9.999999747378752e-06;
- _sample1099_c0_c0 = vec4(t, 1.0, 0.0, 0.0);
- return _sample1099_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0_c1_c0(mediump vec4 _input) {
- mediump vec4 _sample1767_c0_c0;
- mediump float t = _input.x;
- highp vec4 scale, bias;
- if (t < uthreshold_Stage1_c0_c0_c1_c0) {
- scale = uscale01_Stage1_c0_c0_c1_c0;
- bias = ubias01_Stage1_c0_c0_c1_c0;
- } else {
- scale = uscale23_Stage1_c0_c0_c1_c0;
- bias = ubias23_Stage1_c0_c0_c1_c0;
- }
- _sample1767_c0_c0 = t * scale + bias;
- return _sample1767_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1992;
- mediump vec4 _sample1099_c0_c0;
- _sample1099_c0_c0 = stage_Stage1_c0_c0_c0_c0(vec4(1.0));
- mediump vec4 t = _sample1099_c0_c0;
- if (t.x < 0.0) {
- _sample1992 = uleftBorderColor_Stage1_c0_c0;
- } else if (t.x > 1.0) {
- _sample1992 = urightBorderColor_Stage1_c0_c0;
- } else {
- mediump vec4 _sample1767_c0_c0;
- _sample1767_c0_c0 = stage_Stage1_c0_c0_c1_c0(t);
- _sample1992 = _sample1767_c0_c0;
- }
- return _sample1992;
-}
-void main() {
- mediump vec4 outputCoverage_Stage0;
- {
- mediump float alpha = 1.0;
- alpha = vinCoverage_Stage0;
- outputCoverage_Stage0 = vec4(alpha);
- }
- mediump vec4 output_Stage1;
- {
- mediump vec4 _sample1992;
- _sample1992 = stage_Stage1_c0_c0(vec4(1.0, 1.0, 1.0, 1.0));
- output_Stage1 = _sample1992;
- }
- {
- gl_FragColor = output_Stage1 * outputCoverage_Stage0;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_coverage_scale_threshold_3.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_coverage_scale_threshold_3.glsl
deleted file mode 100644
index 1ee4967..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_coverage_scale_threshold_3.glsl
+++ /dev/null
@@ -1,66 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uleftBorderColor_Stage1_c0_c0;
-uniform mediump vec4 urightBorderColor_Stage1_c0_c0;
-uniform highp vec4 uscale01_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias01_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale23_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias23_Stage1_c0_c0_c1_c0;
-uniform mediump float uthreshold_Stage1_c0_c0_c1_c0;
-varying highp vec2 vTransformedCoords_0_Stage0;
-varying mediump vec4 vcolor_Stage0;
-varying highp float vcoverage_Stage0;
-mediump vec4 stage_Stage1_c0_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1099_c0_c0;
- mediump float t = vTransformedCoords_0_Stage0.x + 9.999999747378752e-06;
- _sample1099_c0_c0 = vec4(t, 1.0, 0.0, 0.0);
- return _sample1099_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0_c1_c0(mediump vec4 _input) {
- mediump vec4 _sample1767_c0_c0;
- mediump float t = _input.x;
- highp vec4 scale, bias;
- if (t < uthreshold_Stage1_c0_c0_c1_c0) {
- scale = uscale01_Stage1_c0_c0_c1_c0;
- bias = ubias01_Stage1_c0_c0_c1_c0;
- } else {
- scale = uscale23_Stage1_c0_c0_c1_c0;
- bias = ubias23_Stage1_c0_c0_c1_c0;
- }
- _sample1767_c0_c0 = t * scale + bias;
- return _sample1767_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1992;
- mediump vec4 _sample1099_c0_c0;
- _sample1099_c0_c0 = stage_Stage1_c0_c0_c0_c0(vec4(1.0));
- mediump vec4 t = _sample1099_c0_c0;
- if (t.x < 0.0) {
- _sample1992 = uleftBorderColor_Stage1_c0_c0;
- } else if (t.x > 1.0) {
- _sample1992 = urightBorderColor_Stage1_c0_c0;
- } else {
- mediump vec4 _sample1767_c0_c0;
- _sample1767_c0_c0 = stage_Stage1_c0_c0_c1_c0(t);
- _sample1992 = _sample1767_c0_c0;
- }
- return _sample1992;
-}
-void main() {
- mediump vec4 outputCoverage_Stage0;
- {
- highp float coverage = vcoverage_Stage0;
- outputCoverage_Stage0 = vec4(coverage);
- }
- mediump vec4 output_Stage1;
- {
- mediump vec4 _sample1992;
- _sample1992 = stage_Stage1_c0_c0(vec4(1.0, 1.0, 1.0, 1.0));
- output_Stage1 = _sample1992;
- }
- {
- gl_FragColor = output_Stage1 * outputCoverage_Stage0;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_coverage_scale_thresholds.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_coverage_scale_thresholds.glsl
deleted file mode 100644
index 7bf9ef3..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_coverage_scale_thresholds.glsl
+++ /dev/null
@@ -1,83 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uleftBorderColor_Stage1_c0_c0;
-uniform mediump vec4 urightBorderColor_Stage1_c0_c0;
-uniform highp vec4 uscale0_1_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale2_3_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale4_5_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale6_7_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias0_1_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias2_3_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias4_5_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias6_7_Stage1_c0_c0_c1_c0;
-uniform mediump vec4 uthresholds1_7_Stage1_c0_c0_c1_c0;
-uniform mediump vec4 uthresholds9_13_Stage1_c0_c0_c1_c0;
-varying highp vec2 vTransformedCoords_0_Stage0;
-varying mediump vec4 vcolor_Stage0;
-varying highp float vcoverage_Stage0;
-mediump vec4 stage_Stage1_c0_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1099_c0_c0;
- mediump float t = vTransformedCoords_0_Stage0.x + 9.999999747378752e-06;
- _sample1099_c0_c0 = vec4(t, 1.0, 0.0, 0.0);
- return _sample1099_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0_c1_c0(mediump vec4 _input) {
- mediump vec4 _sample1767_c0_c0;
- mediump float t = _input.x;
- highp vec4 scale, bias;
- {
- if (t < uthresholds1_7_Stage1_c0_c0_c1_c0.y) {
- if (t < uthresholds1_7_Stage1_c0_c0_c1_c0.x) {
- scale = uscale0_1_Stage1_c0_c0_c1_c0;
- bias = ubias0_1_Stage1_c0_c0_c1_c0;
- } else {
- scale = uscale2_3_Stage1_c0_c0_c1_c0;
- bias = ubias2_3_Stage1_c0_c0_c1_c0;
- }
- } else {
- if (t < uthresholds1_7_Stage1_c0_c0_c1_c0.z) {
- scale = uscale4_5_Stage1_c0_c0_c1_c0;
- bias = ubias4_5_Stage1_c0_c0_c1_c0;
- } else {
- scale = uscale6_7_Stage1_c0_c0_c1_c0;
- bias = ubias6_7_Stage1_c0_c0_c1_c0;
- }
- }
- }
- _sample1767_c0_c0 = t * scale + bias;
- return _sample1767_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1992;
- mediump vec4 _sample1099_c0_c0;
- _sample1099_c0_c0 = stage_Stage1_c0_c0_c0_c0(vec4(1.0));
- mediump vec4 t = _sample1099_c0_c0;
- if (t.x < 0.0) {
- _sample1992 = uleftBorderColor_Stage1_c0_c0;
- } else if (t.x > 1.0) {
- _sample1992 = urightBorderColor_Stage1_c0_c0;
- } else {
- mediump vec4 _sample1767_c0_c0;
- _sample1767_c0_c0 = stage_Stage1_c0_c0_c1_c0(t);
- _sample1992 = _sample1767_c0_c0;
- }
- return _sample1992;
-}
-void main() {
- mediump vec4 outputCoverage_Stage0;
- {
- highp float coverage = vcoverage_Stage0;
- outputCoverage_Stage0 = vec4(coverage);
- }
- mediump vec4 output_Stage1;
- {
- mediump vec4 _sample1992;
- _sample1992 = stage_Stage1_c0_c0(vec4(1.0, 1.0, 1.0, 1.0));
- output_Stage1 = _sample1992;
- }
- {
- gl_FragColor = output_Stage1 * outputCoverage_Stage0;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_coverage_scale_thresholds_2.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_coverage_scale_thresholds_2.glsl
deleted file mode 100644
index e6a6613..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_coverage_scale_thresholds_2.glsl
+++ /dev/null
@@ -1,69 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uleftBorderColor_Stage1_c0_c0;
-uniform mediump vec4 urightBorderColor_Stage1_c0_c0;
-uniform highp vec4 uscale01_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias01_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale23_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias23_Stage1_c0_c0_c1_c0;
-uniform mediump float uthreshold_Stage1_c0_c0_c1_c0;
-varying highp vec2 vTransformedCoords_0_Stage0;
-varying mediump vec4 vcolor_Stage0;
-varying highp float vcoverage_Stage0;
-mediump vec4 stage_Stage1_c0_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1099_c0_c0;
- mediump float t = vTransformedCoords_0_Stage0.x + 9.999999747378752e-06;
- _sample1099_c0_c0 = vec4(t, 1.0, 0.0, 0.0);
- return _sample1099_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0_c1_c0(mediump vec4 _input) {
- mediump vec4 _sample1767_c0_c0;
- mediump float t = _input.x;
- highp vec4 scale, bias;
- if (t < uthreshold_Stage1_c0_c0_c1_c0) {
- scale = uscale01_Stage1_c0_c0_c1_c0;
- bias = ubias01_Stage1_c0_c0_c1_c0;
- } else {
- scale = uscale23_Stage1_c0_c0_c1_c0;
- bias = ubias23_Stage1_c0_c0_c1_c0;
- }
- _sample1767_c0_c0 = t * scale + bias;
- return _sample1767_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1992;
- mediump vec4 _sample1099_c0_c0;
- _sample1099_c0_c0 = stage_Stage1_c0_c0_c0_c0(vec4(1.0));
- mediump vec4 t = _sample1099_c0_c0;
- if (t.x < 0.0) {
- _sample1992 = uleftBorderColor_Stage1_c0_c0;
- } else if (t.x > 1.0) {
- _sample1992 = urightBorderColor_Stage1_c0_c0;
- } else {
- mediump vec4 _sample1767_c0_c0;
- _sample1767_c0_c0 = stage_Stage1_c0_c0_c1_c0(t);
- _sample1992 = _sample1767_c0_c0;
- }
- {
- _sample1992.xyz *= _sample1992.w;
- }
- return _sample1992;
-}
-void main() {
- mediump vec4 outputCoverage_Stage0;
- {
- highp float coverage = vcoverage_Stage0;
- outputCoverage_Stage0 = vec4(coverage);
- }
- mediump vec4 output_Stage1;
- {
- mediump vec4 _sample1992;
- _sample1992 = stage_Stage1_c0_c0(vec4(1.0, 1.0, 1.0, 1.0));
- output_Stage1 = _sample1992;
- }
- {
- gl_FragColor = output_Stage1 * outputCoverage_Stage0;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_coverage_scale_thresholds_3.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_coverage_scale_thresholds_3.glsl
deleted file mode 100644
index a5049c7..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_coverage_scale_thresholds_3.glsl
+++ /dev/null
@@ -1,93 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uColor_Stage0;
-uniform mediump vec4 uleftBorderColor_Stage1_c0_c0;
-uniform mediump vec4 urightBorderColor_Stage1_c0_c0;
-uniform highp vec4 uscale0_1_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale2_3_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale4_5_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale6_7_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale8_9_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias0_1_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias2_3_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias4_5_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias6_7_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias8_9_Stage1_c0_c0_c1_c0;
-uniform mediump vec4 uthresholds1_7_Stage1_c0_c0_c1_c0;
-uniform mediump vec4 uthresholds9_13_Stage1_c0_c0_c1_c0;
-varying highp vec2 vTransformedCoords_0_Stage0;
-varying mediump float vinCoverage_Stage0;
-mediump vec4 stage_Stage1_c0_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1099_c0_c0;
- mediump float t = vTransformedCoords_0_Stage0.x + 9.999999747378752e-06;
- _sample1099_c0_c0 = vec4(t, 1.0, 0.0, 0.0);
- return _sample1099_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0_c1_c0(mediump vec4 _input) {
- mediump vec4 _sample1767_c0_c0;
- mediump float t = _input.x;
- highp vec4 scale, bias;
- if (t < uthresholds1_7_Stage1_c0_c0_c1_c0.w) {
- if (t < uthresholds1_7_Stage1_c0_c0_c1_c0.y) {
- if (t < uthresholds1_7_Stage1_c0_c0_c1_c0.x) {
- scale = uscale0_1_Stage1_c0_c0_c1_c0;
- bias = ubias0_1_Stage1_c0_c0_c1_c0;
- } else {
- scale = uscale2_3_Stage1_c0_c0_c1_c0;
- bias = ubias2_3_Stage1_c0_c0_c1_c0;
- }
- } else {
- if (t < uthresholds1_7_Stage1_c0_c0_c1_c0.z) {
- scale = uscale4_5_Stage1_c0_c0_c1_c0;
- bias = ubias4_5_Stage1_c0_c0_c1_c0;
- } else {
- scale = uscale6_7_Stage1_c0_c0_c1_c0;
- bias = ubias6_7_Stage1_c0_c0_c1_c0;
- }
- }
- } else {
- {
- {
- scale = uscale8_9_Stage1_c0_c0_c1_c0;
- bias = ubias8_9_Stage1_c0_c0_c1_c0;
- }
- }
- }
- _sample1767_c0_c0 = t * scale + bias;
- return _sample1767_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1992;
- mediump vec4 _sample1099_c0_c0;
- _sample1099_c0_c0 = stage_Stage1_c0_c0_c0_c0(vec4(1.0));
- mediump vec4 t = _sample1099_c0_c0;
- if (t.x < 0.0) {
- _sample1992 = uleftBorderColor_Stage1_c0_c0;
- } else if (t.x > 1.0) {
- _sample1992 = urightBorderColor_Stage1_c0_c0;
- } else {
- mediump vec4 _sample1767_c0_c0;
- _sample1767_c0_c0 = stage_Stage1_c0_c0_c1_c0(t);
- _sample1992 = _sample1767_c0_c0;
- }
- return _sample1992;
-}
-void main() {
- mediump vec4 outputCoverage_Stage0;
- {
- mediump float alpha = 1.0;
- alpha = vinCoverage_Stage0;
- outputCoverage_Stage0 = vec4(alpha);
- }
- mediump vec4 output_Stage1;
- {
- mediump vec4 _sample1992;
- _sample1992 = stage_Stage1_c0_c0(vec4(1.0, 1.0, 1.0, 1.0));
- output_Stage1 = _sample1992;
- }
- {
- gl_FragColor = output_Stage1 * outputCoverage_Stage0;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_coverage_scale_thresholds_4.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_coverage_scale_thresholds_4.glsl
deleted file mode 100644
index 7a83670..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_coverage_scale_thresholds_4.glsl
+++ /dev/null
@@ -1,98 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uColor_Stage0;
-uniform mediump vec4 uleftBorderColor_Stage1_c0_c0;
-uniform mediump vec4 urightBorderColor_Stage1_c0_c0;
-uniform highp vec4 uscale0_1_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale2_3_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale4_5_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale6_7_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale8_9_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale10_11_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias0_1_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias2_3_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias4_5_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias6_7_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias8_9_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias10_11_Stage1_c0_c0_c1_c0;
-uniform mediump vec4 uthresholds1_7_Stage1_c0_c0_c1_c0;
-uniform mediump vec4 uthresholds9_13_Stage1_c0_c0_c1_c0;
-varying highp vec2 vTransformedCoords_0_Stage0;
-varying mediump float vinCoverage_Stage0;
-mediump vec4 stage_Stage1_c0_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1099_c0_c0;
- mediump float t = length(vTransformedCoords_0_Stage0);
- _sample1099_c0_c0 = vec4(t, 1.0, 0.0, 0.0);
- return _sample1099_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0_c1_c0(mediump vec4 _input) {
- mediump vec4 _sample1767_c0_c0;
- mediump float t = _input.x;
- highp vec4 scale, bias;
- if (t < uthresholds1_7_Stage1_c0_c0_c1_c0.w) {
- if (t < uthresholds1_7_Stage1_c0_c0_c1_c0.y) {
- if (t < uthresholds1_7_Stage1_c0_c0_c1_c0.x) {
- scale = uscale0_1_Stage1_c0_c0_c1_c0;
- bias = ubias0_1_Stage1_c0_c0_c1_c0;
- } else {
- scale = uscale2_3_Stage1_c0_c0_c1_c0;
- bias = ubias2_3_Stage1_c0_c0_c1_c0;
- }
- } else {
- if (t < uthresholds1_7_Stage1_c0_c0_c1_c0.z) {
- scale = uscale4_5_Stage1_c0_c0_c1_c0;
- bias = ubias4_5_Stage1_c0_c0_c1_c0;
- } else {
- scale = uscale6_7_Stage1_c0_c0_c1_c0;
- bias = ubias6_7_Stage1_c0_c0_c1_c0;
- }
- }
- } else {
- {
- if (t < uthresholds9_13_Stage1_c0_c0_c1_c0.x) {
- scale = uscale8_9_Stage1_c0_c0_c1_c0;
- bias = ubias8_9_Stage1_c0_c0_c1_c0;
- } else {
- scale = uscale10_11_Stage1_c0_c0_c1_c0;
- bias = ubias10_11_Stage1_c0_c0_c1_c0;
- }
- }
- }
- _sample1767_c0_c0 = t * scale + bias;
- return _sample1767_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1992;
- mediump vec4 _sample1099_c0_c0;
- _sample1099_c0_c0 = stage_Stage1_c0_c0_c0_c0(vec4(1.0));
- mediump vec4 t = _sample1099_c0_c0;
- if (t.x < 0.0) {
- _sample1992 = uleftBorderColor_Stage1_c0_c0;
- } else if (t.x > 1.0) {
- _sample1992 = urightBorderColor_Stage1_c0_c0;
- } else {
- mediump vec4 _sample1767_c0_c0;
- _sample1767_c0_c0 = stage_Stage1_c0_c0_c1_c0(t);
- _sample1992 = _sample1767_c0_c0;
- }
- return _sample1992;
-}
-void main() {
- mediump vec4 outputCoverage_Stage0;
- {
- mediump float alpha = 1.0;
- alpha = vinCoverage_Stage0;
- outputCoverage_Stage0 = vec4(alpha);
- }
- mediump vec4 output_Stage1;
- {
- mediump vec4 _sample1992;
- _sample1992 = stage_Stage1_c0_c0(vec4(1.0, 1.0, 1.0, 1.0));
- output_Stage1 = _sample1992;
- }
- {
- gl_FragColor = output_Stage1 * outputCoverage_Stage0;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_decal_scaletranslate_texdom_texturew_threshold.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_decal_scaletranslate_texdom_texturew_threshold.glsl
deleted file mode 100644
index e338bcf..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_decal_scaletranslate_texdom_texturew_threshold.glsl
+++ /dev/null
@@ -1,89 +0,0 @@
-#version 100
-
-uniform highp float u_skRTHeight;
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uleftBorderColor_Stage1_c0_c0;
-uniform mediump vec4 urightBorderColor_Stage1_c0_c0;
-uniform highp vec4 uscale01_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias01_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale23_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias23_Stage1_c0_c0_c1_c0;
-uniform mediump float uthreshold_Stage1_c0_c0_c1_c0;
-uniform mediump vec4 uscaleAndTranslate_Stage2;
-uniform mediump vec4 uTexDom_Stage2;
-uniform mediump vec3 uDecalParams_Stage2;
-uniform sampler2D uTextureSampler_0_Stage2;
-varying mediump vec4 vcolor_Stage0;
-varying highp vec2 vTransformedCoords_0_Stage0;
-mediump vec4 stage_Stage1_c0_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1099_c0_c0;
- mediump float t = vTransformedCoords_0_Stage0.x + 9.999999747378752e-06;
- _sample1099_c0_c0 = vec4(t, 1.0, 0.0, 0.0);
- return _sample1099_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0_c1_c0(mediump vec4 _input) {
- mediump vec4 _sample1767_c0_c0;
- mediump float t = _input.x;
- highp vec4 scale, bias;
- if (t < uthreshold_Stage1_c0_c0_c1_c0) {
- scale = uscale01_Stage1_c0_c0_c1_c0;
- bias = ubias01_Stage1_c0_c0_c1_c0;
- } else {
- scale = uscale23_Stage1_c0_c0_c1_c0;
- bias = ubias23_Stage1_c0_c0_c1_c0;
- }
- _sample1767_c0_c0 = t * scale + bias;
- return _sample1767_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 child;
- mediump vec4 _sample1099_c0_c0;
- _sample1099_c0_c0 = stage_Stage1_c0_c0_c0_c0(vec4(1.0));
- mediump vec4 t = _sample1099_c0_c0;
- if (t.x < 0.0) {
- child = uleftBorderColor_Stage1_c0_c0;
- } else if (t.x > 1.0) {
- child = urightBorderColor_Stage1_c0_c0;
- } else {
- mediump vec4 _sample1767_c0_c0;
- _sample1767_c0_c0 = stage_Stage1_c0_c0_c1_c0(t);
- child = _sample1767_c0_c0;
- }
- {
- child.xyz *= child.w;
- }
- return child;
-}
-void main() {
-highp vec4 sk_FragCoord = vec4(gl_FragCoord.x, u_skRTHeight - gl_FragCoord.y, gl_FragCoord.z, gl_FragCoord.w);
- mediump vec4 outputColor_Stage0;
- {
- outputColor_Stage0 = vcolor_Stage0;
- }
- mediump vec4 output_Stage1;
- {
- mediump vec4 child;
- child = stage_Stage1_c0_c0(vec4(1.0));
- output_Stage1 = child * outputColor_Stage0.w;
- }
- mediump vec4 output_Stage2;
- {
- mediump vec2 coords = sk_FragCoord.xy * uscaleAndTranslate_Stage2.xy + uscaleAndTranslate_Stage2.zw;
- {
- highp vec2 origCoord = coords;
- highp vec2 clampedCoord = clamp(origCoord, uTexDom_Stage2.xy, uTexDom_Stage2.zw);
- mediump vec4 inside = texture2D(uTextureSampler_0_Stage2, clampedCoord).wwww;
- mediump float err = max(abs(clampedCoord.x - origCoord.x) * uDecalParams_Stage2.x, abs(clampedCoord.y - origCoord.y) * uDecalParams_Stage2.y);
- if (err > uDecalParams_Stage2.z) {
- err = 1.0;
- } else if (uDecalParams_Stage2.z < 1.0) {
- err = 0.0;
- }
- output_Stage2 = mix(inside, vec4(0.0, 0.0, 0.0, 0.0), err);
- }
- }
- {
- gl_FragColor = output_Stage1 * output_Stage2;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_quad_scale_threshold.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_quad_scale_threshold.glsl
deleted file mode 100644
index 667a564..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_quad_scale_threshold.glsl
+++ /dev/null
@@ -1,76 +0,0 @@
-#version 100
-
-#extension GL_OES_standard_derivatives : require
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uleftBorderColor_Stage1_c0_c0;
-uniform mediump vec4 urightBorderColor_Stage1_c0_c0;
-uniform highp vec4 uscale01_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias01_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale23_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias23_Stage1_c0_c0_c1_c0;
-uniform mediump float uthreshold_Stage1_c0_c0_c1_c0;
-varying mediump vec4 vQuadEdge_Stage0;
-varying mediump vec4 vinColor_Stage0;
-varying highp vec2 vTransformedCoords_0_Stage0;
-mediump vec4 stage_Stage1_c0_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1099_c0_c0;
- mediump float t = vTransformedCoords_0_Stage0.x + 9.999999747378752e-06;
- _sample1099_c0_c0 = vec4(t, 1.0, 0.0, 0.0);
- return _sample1099_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0_c1_c0(mediump vec4 _input) {
- mediump vec4 _sample1767_c0_c0;
- mediump float t = _input.x;
- highp vec4 scale, bias;
- if (t < uthreshold_Stage1_c0_c0_c1_c0) {
- scale = uscale01_Stage1_c0_c0_c1_c0;
- bias = ubias01_Stage1_c0_c0_c1_c0;
- } else {
- scale = uscale23_Stage1_c0_c0_c1_c0;
- bias = ubias23_Stage1_c0_c0_c1_c0;
- }
- _sample1767_c0_c0 = t * scale + bias;
- return _sample1767_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1992;
- mediump vec4 _sample1099_c0_c0;
- _sample1099_c0_c0 = stage_Stage1_c0_c0_c0_c0(vec4(1.0));
- mediump vec4 t = _sample1099_c0_c0;
- if (t.x < 0.0) {
- _sample1992 = uleftBorderColor_Stage1_c0_c0;
- } else if (t.x > 1.0) {
- _sample1992 = urightBorderColor_Stage1_c0_c0;
- } else {
- mediump vec4 _sample1767_c0_c0;
- _sample1767_c0_c0 = stage_Stage1_c0_c0_c1_c0(t);
- _sample1992 = _sample1767_c0_c0;
- }
- return _sample1992;
-}
-void main() {
- mediump vec4 outputCoverage_Stage0;
- {
- mediump float edgeAlpha;
- mediump vec2 duvdx = dFdx(vQuadEdge_Stage0.xy);
- mediump vec2 duvdy = -dFdy(vQuadEdge_Stage0.xy);
- if (vQuadEdge_Stage0.z > 0.0 && vQuadEdge_Stage0.w > 0.0) {
- edgeAlpha = min(min(vQuadEdge_Stage0.z, vQuadEdge_Stage0.w) + 0.5, 1.0);
- } else {
- mediump vec2 gF = vec2((2.0 * vQuadEdge_Stage0.x) * duvdx.x - duvdx.y, (2.0 * vQuadEdge_Stage0.x) * duvdy.x - duvdy.y);
- edgeAlpha = vQuadEdge_Stage0.x * vQuadEdge_Stage0.x - vQuadEdge_Stage0.y;
- edgeAlpha = clamp(0.5 - edgeAlpha / length(gF), 0.0, 1.0);
- }
- outputCoverage_Stage0 = vec4(edgeAlpha);
- }
- mediump vec4 output_Stage1;
- {
- mediump vec4 _sample1992;
- _sample1992 = stage_Stage1_c0_c0(vec4(1.0, 1.0, 1.0, 1.0));
- output_Stage1 = _sample1992;
- }
- {
- gl_FragColor = output_Stage1 * outputCoverage_Stage0;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_quad_scale_threshold_2.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_quad_scale_threshold_2.glsl
deleted file mode 100644
index 35a3010..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_quad_scale_threshold_2.glsl
+++ /dev/null
@@ -1,78 +0,0 @@
-#version 100
-
-#extension GL_OES_standard_derivatives : require
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uleftBorderColor_Stage1_c0_c0;
-uniform mediump vec4 urightBorderColor_Stage1_c0_c0;
-uniform highp vec4 uscale01_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias01_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale23_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias23_Stage1_c0_c0_c1_c0;
-uniform mediump float uthreshold_Stage1_c0_c0_c1_c0;
-varying mediump vec4 vQuadEdge_Stage0;
-varying mediump vec4 vinColor_Stage0;
-varying highp vec2 vTransformedCoords_0_Stage0;
-mediump vec4 stage_Stage1_c0_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1099_c0_c0;
- mediump float t = vTransformedCoords_0_Stage0.x + 9.999999747378752e-06;
- _sample1099_c0_c0 = vec4(t, 1.0, 0.0, 0.0);
- return _sample1099_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0_c1_c0(mediump vec4 _input) {
- mediump vec4 _sample1767_c0_c0;
- mediump float t = _input.x;
- highp vec4 scale, bias;
- if (t < uthreshold_Stage1_c0_c0_c1_c0) {
- scale = uscale01_Stage1_c0_c0_c1_c0;
- bias = ubias01_Stage1_c0_c0_c1_c0;
- } else {
- scale = uscale23_Stage1_c0_c0_c1_c0;
- bias = ubias23_Stage1_c0_c0_c1_c0;
- }
- _sample1767_c0_c0 = t * scale + bias;
- return _sample1767_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 child;
- mediump vec4 _sample1099_c0_c0;
- _sample1099_c0_c0 = stage_Stage1_c0_c0_c0_c0(vec4(1.0));
- mediump vec4 t = _sample1099_c0_c0;
- if (t.x < 0.0) {
- child = uleftBorderColor_Stage1_c0_c0;
- } else if (t.x > 1.0) {
- child = urightBorderColor_Stage1_c0_c0;
- } else {
- mediump vec4 _sample1767_c0_c0;
- _sample1767_c0_c0 = stage_Stage1_c0_c0_c1_c0(t);
- child = _sample1767_c0_c0;
- }
- return child;
-}
-void main() {
- mediump vec4 outputColor_Stage0;
- mediump vec4 outputCoverage_Stage0;
- {
- outputColor_Stage0 = vinColor_Stage0;
- mediump float edgeAlpha;
- mediump vec2 duvdx = dFdx(vQuadEdge_Stage0.xy);
- mediump vec2 duvdy = -dFdy(vQuadEdge_Stage0.xy);
- if (vQuadEdge_Stage0.z > 0.0 && vQuadEdge_Stage0.w > 0.0) {
- edgeAlpha = min(min(vQuadEdge_Stage0.z, vQuadEdge_Stage0.w) + 0.5, 1.0);
- } else {
- mediump vec2 gF = vec2((2.0 * vQuadEdge_Stage0.x) * duvdx.x - duvdx.y, (2.0 * vQuadEdge_Stage0.x) * duvdy.x - duvdy.y);
- edgeAlpha = vQuadEdge_Stage0.x * vQuadEdge_Stage0.x - vQuadEdge_Stage0.y;
- edgeAlpha = clamp(0.5 - edgeAlpha / length(gF), 0.0, 1.0);
- }
- outputCoverage_Stage0 = vec4(edgeAlpha);
- }
- mediump vec4 output_Stage1;
- {
- mediump vec4 child;
- child = stage_Stage1_c0_c0(vec4(1.0));
- output_Stage1 = child * outputColor_Stage0.w;
- }
- {
- gl_FragColor = output_Stage1 * outputCoverage_Stage0;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_quad_scale_threshold_3.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_quad_scale_threshold_3.glsl
deleted file mode 100644
index 1f47fae..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_quad_scale_threshold_3.glsl
+++ /dev/null
@@ -1,79 +0,0 @@
-#version 100
-
-#extension GL_OES_standard_derivatives : require
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uleftBorderColor_Stage1_c0_c0;
-uniform mediump vec4 urightBorderColor_Stage1_c0_c0;
-uniform highp vec4 uscale01_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias01_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale23_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias23_Stage1_c0_c0_c1_c0;
-uniform mediump float uthreshold_Stage1_c0_c0_c1_c0;
-varying mediump vec4 vQuadEdge_Stage0;
-varying mediump vec4 vinColor_Stage0;
-varying highp vec2 vTransformedCoords_0_Stage0;
-mediump vec4 stage_Stage1_c0_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1099_c0_c0;
- mediump float t = vTransformedCoords_0_Stage0.x + 9.999999747378752e-06;
- _sample1099_c0_c0 = vec4(t, 1.0, 0.0, 0.0);
- return _sample1099_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0_c1_c0(mediump vec4 _input) {
- mediump vec4 _sample1767_c0_c0;
- mediump float t = _input.x;
- highp vec4 scale, bias;
- if (t < uthreshold_Stage1_c0_c0_c1_c0) {
- scale = uscale01_Stage1_c0_c0_c1_c0;
- bias = ubias01_Stage1_c0_c0_c1_c0;
- } else {
- scale = uscale23_Stage1_c0_c0_c1_c0;
- bias = ubias23_Stage1_c0_c0_c1_c0;
- }
- _sample1767_c0_c0 = t * scale + bias;
- return _sample1767_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1992;
- mediump vec4 _sample1099_c0_c0;
- _sample1099_c0_c0 = stage_Stage1_c0_c0_c0_c0(vec4(1.0));
- mediump vec4 t = _sample1099_c0_c0;
- if (t.x < 0.0) {
- _sample1992 = uleftBorderColor_Stage1_c0_c0;
- } else if (t.x > 1.0) {
- _sample1992 = urightBorderColor_Stage1_c0_c0;
- } else {
- mediump vec4 _sample1767_c0_c0;
- _sample1767_c0_c0 = stage_Stage1_c0_c0_c1_c0(t);
- _sample1992 = _sample1767_c0_c0;
- }
- {
- _sample1992.xyz *= _sample1992.w;
- }
- return _sample1992;
-}
-void main() {
- mediump vec4 outputCoverage_Stage0;
- {
- mediump float edgeAlpha;
- mediump vec2 duvdx = dFdx(vQuadEdge_Stage0.xy);
- mediump vec2 duvdy = -dFdy(vQuadEdge_Stage0.xy);
- if (vQuadEdge_Stage0.z > 0.0 && vQuadEdge_Stage0.w > 0.0) {
- edgeAlpha = min(min(vQuadEdge_Stage0.z, vQuadEdge_Stage0.w) + 0.5, 1.0);
- } else {
- mediump vec2 gF = vec2((2.0 * vQuadEdge_Stage0.x) * duvdx.x - duvdx.y, (2.0 * vQuadEdge_Stage0.x) * duvdy.x - duvdy.y);
- edgeAlpha = vQuadEdge_Stage0.x * vQuadEdge_Stage0.x - vQuadEdge_Stage0.y;
- edgeAlpha = clamp(0.5 - edgeAlpha / length(gF), 0.0, 1.0);
- }
- outputCoverage_Stage0 = vec4(edgeAlpha);
- }
- mediump vec4 output_Stage1;
- {
- mediump vec4 _sample1992;
- _sample1992 = stage_Stage1_c0_c0(vec4(1.0, 1.0, 1.0, 1.0));
- output_Stage1 = _sample1992;
- }
- {
- gl_FragColor = output_Stage1 * outputCoverage_Stage0;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_quad_scale_thresholds.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_quad_scale_thresholds.glsl
deleted file mode 100644
index b7223a3..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_quad_scale_thresholds.glsl
+++ /dev/null
@@ -1,88 +0,0 @@
-#version 100
-
-#extension GL_OES_standard_derivatives : require
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uleftBorderColor_Stage1_c0_c0;
-uniform mediump vec4 urightBorderColor_Stage1_c0_c0;
-uniform highp vec4 uscale0_1_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale2_3_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale4_5_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias0_1_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias2_3_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias4_5_Stage1_c0_c0_c1_c0;
-uniform mediump vec4 uthresholds1_7_Stage1_c0_c0_c1_c0;
-uniform mediump vec4 uthresholds9_13_Stage1_c0_c0_c1_c0;
-varying mediump vec4 vQuadEdge_Stage0;
-varying mediump vec4 vinColor_Stage0;
-varying highp vec2 vTransformedCoords_0_Stage0;
-mediump vec4 stage_Stage1_c0_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1099_c0_c0;
- mediump float t = vTransformedCoords_0_Stage0.x + 9.999999747378752e-06;
- _sample1099_c0_c0 = vec4(t, 1.0, 0.0, 0.0);
- return _sample1099_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0_c1_c0(mediump vec4 _input) {
- mediump vec4 _sample1767_c0_c0;
- mediump float t = _input.x;
- highp vec4 scale, bias;
- {
- if (t < uthresholds1_7_Stage1_c0_c0_c1_c0.y) {
- if (t < uthresholds1_7_Stage1_c0_c0_c1_c0.x) {
- scale = uscale0_1_Stage1_c0_c0_c1_c0;
- bias = ubias0_1_Stage1_c0_c0_c1_c0;
- } else {
- scale = uscale2_3_Stage1_c0_c0_c1_c0;
- bias = ubias2_3_Stage1_c0_c0_c1_c0;
- }
- } else {
- {
- scale = uscale4_5_Stage1_c0_c0_c1_c0;
- bias = ubias4_5_Stage1_c0_c0_c1_c0;
- }
- }
- }
- _sample1767_c0_c0 = t * scale + bias;
- return _sample1767_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1992;
- mediump vec4 _sample1099_c0_c0;
- _sample1099_c0_c0 = stage_Stage1_c0_c0_c0_c0(vec4(1.0));
- mediump vec4 t = _sample1099_c0_c0;
- if (t.x < 0.0) {
- _sample1992 = uleftBorderColor_Stage1_c0_c0;
- } else if (t.x > 1.0) {
- _sample1992 = urightBorderColor_Stage1_c0_c0;
- } else {
- mediump vec4 _sample1767_c0_c0;
- _sample1767_c0_c0 = stage_Stage1_c0_c0_c1_c0(t);
- _sample1992 = _sample1767_c0_c0;
- }
- return _sample1992;
-}
-void main() {
- mediump vec4 outputCoverage_Stage0;
- {
- mediump float edgeAlpha;
- mediump vec2 duvdx = dFdx(vQuadEdge_Stage0.xy);
- mediump vec2 duvdy = -dFdy(vQuadEdge_Stage0.xy);
- if (vQuadEdge_Stage0.z > 0.0 && vQuadEdge_Stage0.w > 0.0) {
- edgeAlpha = min(min(vQuadEdge_Stage0.z, vQuadEdge_Stage0.w) + 0.5, 1.0);
- } else {
- mediump vec2 gF = vec2((2.0 * vQuadEdge_Stage0.x) * duvdx.x - duvdx.y, (2.0 * vQuadEdge_Stage0.x) * duvdy.x - duvdy.y);
- edgeAlpha = vQuadEdge_Stage0.x * vQuadEdge_Stage0.x - vQuadEdge_Stage0.y;
- edgeAlpha = clamp(0.5 - edgeAlpha / length(gF), 0.0, 1.0);
- }
- outputCoverage_Stage0 = vec4(edgeAlpha);
- }
- mediump vec4 output_Stage1;
- {
- mediump vec4 _sample1992;
- _sample1992 = stage_Stage1_c0_c0(vec4(1.0, 1.0, 1.0, 1.0));
- output_Stage1 = _sample1992;
- }
- {
- gl_FragColor = output_Stage1 * outputCoverage_Stage0;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_quad_scale_thresholds_2.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_quad_scale_thresholds_2.glsl
deleted file mode 100644
index ba5cadd..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_quad_scale_thresholds_2.glsl
+++ /dev/null
@@ -1,93 +0,0 @@
-#version 100
-
-#extension GL_OES_standard_derivatives : require
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uleftBorderColor_Stage1_c0_c0;
-uniform mediump vec4 urightBorderColor_Stage1_c0_c0;
-uniform highp vec4 uscale0_1_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale2_3_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale4_5_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale6_7_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias0_1_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias2_3_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias4_5_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias6_7_Stage1_c0_c0_c1_c0;
-uniform mediump vec4 uthresholds1_7_Stage1_c0_c0_c1_c0;
-uniform mediump vec4 uthresholds9_13_Stage1_c0_c0_c1_c0;
-varying mediump vec4 vQuadEdge_Stage0;
-varying mediump vec4 vinColor_Stage0;
-varying highp vec2 vTransformedCoords_0_Stage0;
-mediump vec4 stage_Stage1_c0_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1099_c0_c0;
- mediump float t = vTransformedCoords_0_Stage0.x + 9.999999747378752e-06;
- _sample1099_c0_c0 = vec4(t, 1.0, 0.0, 0.0);
- return _sample1099_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0_c1_c0(mediump vec4 _input) {
- mediump vec4 _sample1767_c0_c0;
- mediump float t = _input.x;
- highp vec4 scale, bias;
- {
- if (t < uthresholds1_7_Stage1_c0_c0_c1_c0.y) {
- if (t < uthresholds1_7_Stage1_c0_c0_c1_c0.x) {
- scale = uscale0_1_Stage1_c0_c0_c1_c0;
- bias = ubias0_1_Stage1_c0_c0_c1_c0;
- } else {
- scale = uscale2_3_Stage1_c0_c0_c1_c0;
- bias = ubias2_3_Stage1_c0_c0_c1_c0;
- }
- } else {
- if (t < uthresholds1_7_Stage1_c0_c0_c1_c0.z) {
- scale = uscale4_5_Stage1_c0_c0_c1_c0;
- bias = ubias4_5_Stage1_c0_c0_c1_c0;
- } else {
- scale = uscale6_7_Stage1_c0_c0_c1_c0;
- bias = ubias6_7_Stage1_c0_c0_c1_c0;
- }
- }
- }
- _sample1767_c0_c0 = t * scale + bias;
- return _sample1767_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1992;
- mediump vec4 _sample1099_c0_c0;
- _sample1099_c0_c0 = stage_Stage1_c0_c0_c0_c0(vec4(1.0));
- mediump vec4 t = _sample1099_c0_c0;
- if (t.x < 0.0) {
- _sample1992 = uleftBorderColor_Stage1_c0_c0;
- } else if (t.x > 1.0) {
- _sample1992 = urightBorderColor_Stage1_c0_c0;
- } else {
- mediump vec4 _sample1767_c0_c0;
- _sample1767_c0_c0 = stage_Stage1_c0_c0_c1_c0(t);
- _sample1992 = _sample1767_c0_c0;
- }
- return _sample1992;
-}
-void main() {
- mediump vec4 outputCoverage_Stage0;
- {
- mediump float edgeAlpha;
- mediump vec2 duvdx = dFdx(vQuadEdge_Stage0.xy);
- mediump vec2 duvdy = -dFdy(vQuadEdge_Stage0.xy);
- if (vQuadEdge_Stage0.z > 0.0 && vQuadEdge_Stage0.w > 0.0) {
- edgeAlpha = min(min(vQuadEdge_Stage0.z, vQuadEdge_Stage0.w) + 0.5, 1.0);
- } else {
- mediump vec2 gF = vec2((2.0 * vQuadEdge_Stage0.x) * duvdx.x - duvdx.y, (2.0 * vQuadEdge_Stage0.x) * duvdy.x - duvdy.y);
- edgeAlpha = vQuadEdge_Stage0.x * vQuadEdge_Stage0.x - vQuadEdge_Stage0.y;
- edgeAlpha = clamp(0.5 - edgeAlpha / length(gF), 0.0, 1.0);
- }
- outputCoverage_Stage0 = vec4(edgeAlpha);
- }
- mediump vec4 output_Stage1;
- {
- mediump vec4 _sample1992;
- _sample1992 = stage_Stage1_c0_c0(vec4(1.0, 1.0, 1.0, 1.0));
- output_Stage1 = _sample1992;
- }
- {
- gl_FragColor = output_Stage1 * outputCoverage_Stage0;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_scale_texindex_texturew_threshold.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_scale_texindex_texturew_threshold.glsl
deleted file mode 100644
index 8bcc27a..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_scale_texindex_texturew_threshold.glsl
+++ /dev/null
@@ -1,73 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uleftBorderColor_Stage1_c0_c0;
-uniform mediump vec4 urightBorderColor_Stage1_c0_c0;
-uniform highp vec4 uscale01_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias01_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale23_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias23_Stage1_c0_c0_c1_c0;
-uniform mediump float uthreshold_Stage1_c0_c0_c1_c0;
-uniform sampler2D uTextureSampler_0_Stage0;
-varying highp vec2 vTextureCoords_Stage0;
-varying highp float vTexIndex_Stage0;
-varying mediump vec4 vinColor_Stage0;
-varying highp vec2 vTransformedCoords_0_Stage0;
-mediump vec4 stage_Stage1_c0_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1099_c0_c0;
- mediump float t = vTransformedCoords_0_Stage0.x + 9.999999747378752e-06;
- _sample1099_c0_c0 = vec4(t, 1.0, 0.0, 0.0);
- return _sample1099_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0_c1_c0(mediump vec4 _input) {
- mediump vec4 _sample1767_c0_c0;
- mediump float t = _input.x;
- highp vec4 scale, bias;
- if (t < uthreshold_Stage1_c0_c0_c1_c0) {
- scale = uscale01_Stage1_c0_c0_c1_c0;
- bias = ubias01_Stage1_c0_c0_c1_c0;
- } else {
- scale = uscale23_Stage1_c0_c0_c1_c0;
- bias = ubias23_Stage1_c0_c0_c1_c0;
- }
- _sample1767_c0_c0 = t * scale + bias;
- return _sample1767_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 child;
- mediump vec4 _sample1099_c0_c0;
- _sample1099_c0_c0 = stage_Stage1_c0_c0_c0_c0(vec4(1.0));
- mediump vec4 t = _sample1099_c0_c0;
- if (t.x < 0.0) {
- child = uleftBorderColor_Stage1_c0_c0;
- } else if (t.x > 1.0) {
- child = urightBorderColor_Stage1_c0_c0;
- } else {
- mediump vec4 _sample1767_c0_c0;
- _sample1767_c0_c0 = stage_Stage1_c0_c0_c1_c0(t);
- child = _sample1767_c0_c0;
- }
- return child;
-}
-void main() {
- mediump vec4 outputColor_Stage0;
- mediump vec4 outputCoverage_Stage0;
- {
- outputColor_Stage0 = vinColor_Stage0;
- mediump vec4 texColor;
- {
- texColor = texture2D(uTextureSampler_0_Stage0, vTextureCoords_Stage0).wwww;
- }
- outputCoverage_Stage0 = texColor;
- }
- mediump vec4 output_Stage1;
- {
- mediump vec4 child;
- child = stage_Stage1_c0_c0(vec4(1.0));
- output_Stage1 = child * outputColor_Stage0.w;
- }
- {
- gl_FragColor = output_Stage1 * outputCoverage_Stage0;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_scale_texindex_texturew_threshold_2.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_scale_texindex_texturew_threshold_2.glsl
deleted file mode 100644
index 2ac7fcc..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_scale_texindex_texturew_threshold_2.glsl
+++ /dev/null
@@ -1,74 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uleftBorderColor_Stage1_c0_c0;
-uniform mediump vec4 urightBorderColor_Stage1_c0_c0;
-uniform highp vec4 uscale01_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias01_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale23_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias23_Stage1_c0_c0_c1_c0;
-uniform mediump float uthreshold_Stage1_c0_c0_c1_c0;
-uniform sampler2D uTextureSampler_0_Stage0;
-varying highp vec2 vTextureCoords_Stage0;
-varying highp float vTexIndex_Stage0;
-varying mediump vec4 vinColor_Stage0;
-varying highp vec2 vTransformedCoords_0_Stage0;
-mediump vec4 stage_Stage1_c0_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1099_c0_c0;
- mediump float t = vTransformedCoords_0_Stage0.x + 9.999999747378752e-06;
- _sample1099_c0_c0 = vec4(t, 1.0, 0.0, 0.0);
- return _sample1099_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0_c1_c0(mediump vec4 _input) {
- mediump vec4 _sample1767_c0_c0;
- mediump float t = _input.x;
- highp vec4 scale, bias;
- if (t < uthreshold_Stage1_c0_c0_c1_c0) {
- scale = uscale01_Stage1_c0_c0_c1_c0;
- bias = ubias01_Stage1_c0_c0_c1_c0;
- } else {
- scale = uscale23_Stage1_c0_c0_c1_c0;
- bias = ubias23_Stage1_c0_c0_c1_c0;
- }
- _sample1767_c0_c0 = t * scale + bias;
- return _sample1767_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1992;
- mediump vec4 _sample1099_c0_c0;
- _sample1099_c0_c0 = stage_Stage1_c0_c0_c0_c0(vec4(1.0));
- mediump vec4 t = _sample1099_c0_c0;
- if (t.x < 0.0) {
- _sample1992 = uleftBorderColor_Stage1_c0_c0;
- } else if (t.x > 1.0) {
- _sample1992 = urightBorderColor_Stage1_c0_c0;
- } else {
- mediump vec4 _sample1767_c0_c0;
- _sample1767_c0_c0 = stage_Stage1_c0_c0_c1_c0(t);
- _sample1992 = _sample1767_c0_c0;
- }
- {
- _sample1992.xyz *= _sample1992.w;
- }
- return _sample1992;
-}
-void main() {
- mediump vec4 outputCoverage_Stage0;
- {
- mediump vec4 texColor;
- {
- texColor = texture2D(uTextureSampler_0_Stage0, vTextureCoords_Stage0).wwww;
- }
- outputCoverage_Stage0 = texColor;
- }
- mediump vec4 output_Stage1;
- {
- mediump vec4 _sample1992;
- _sample1992 = stage_Stage1_c0_c0(vec4(1.0, 1.0, 1.0, 1.0));
- output_Stage1 = _sample1992;
- }
- {
- gl_FragColor = output_Stage1 * outputCoverage_Stage0;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_scale_texindex_texturew_threshold_3.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_scale_texindex_texturew_threshold_3.glsl
deleted file mode 100644
index 1cb8137..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_scale_texindex_texturew_threshold_3.glsl
+++ /dev/null
@@ -1,71 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uleftBorderColor_Stage1_c0_c0;
-uniform mediump vec4 urightBorderColor_Stage1_c0_c0;
-uniform highp vec4 uscale01_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias01_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale23_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias23_Stage1_c0_c0_c1_c0;
-uniform mediump float uthreshold_Stage1_c0_c0_c1_c0;
-uniform sampler2D uTextureSampler_0_Stage0;
-varying highp vec2 vTextureCoords_Stage0;
-varying highp float vTexIndex_Stage0;
-varying mediump vec4 vinColor_Stage0;
-varying highp vec2 vTransformedCoords_0_Stage0;
-mediump vec4 stage_Stage1_c0_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1099_c0_c0;
- mediump float t = vTransformedCoords_0_Stage0.x + 9.999999747378752e-06;
- _sample1099_c0_c0 = vec4(t, 1.0, 0.0, 0.0);
- return _sample1099_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0_c1_c0(mediump vec4 _input) {
- mediump vec4 _sample1767_c0_c0;
- mediump float t = _input.x;
- highp vec4 scale, bias;
- if (t < uthreshold_Stage1_c0_c0_c1_c0) {
- scale = uscale01_Stage1_c0_c0_c1_c0;
- bias = ubias01_Stage1_c0_c0_c1_c0;
- } else {
- scale = uscale23_Stage1_c0_c0_c1_c0;
- bias = ubias23_Stage1_c0_c0_c1_c0;
- }
- _sample1767_c0_c0 = t * scale + bias;
- return _sample1767_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1992;
- mediump vec4 _sample1099_c0_c0;
- _sample1099_c0_c0 = stage_Stage1_c0_c0_c0_c0(vec4(1.0));
- mediump vec4 t = _sample1099_c0_c0;
- if (t.x < 0.0) {
- _sample1992 = uleftBorderColor_Stage1_c0_c0;
- } else if (t.x > 1.0) {
- _sample1992 = urightBorderColor_Stage1_c0_c0;
- } else {
- mediump vec4 _sample1767_c0_c0;
- _sample1767_c0_c0 = stage_Stage1_c0_c0_c1_c0(t);
- _sample1992 = _sample1767_c0_c0;
- }
- return _sample1992;
-}
-void main() {
- mediump vec4 outputCoverage_Stage0;
- {
- mediump vec4 texColor;
- {
- texColor = texture2D(uTextureSampler_0_Stage0, vTextureCoords_Stage0).wwww;
- }
- outputCoverage_Stage0 = texColor;
- }
- mediump vec4 output_Stage1;
- {
- mediump vec4 _sample1992;
- _sample1992 = stage_Stage1_c0_c0(vec4(1.0, 1.0, 1.0, 1.0));
- output_Stage1 = _sample1992;
- }
- {
- gl_FragColor = output_Stage1 * outputCoverage_Stage0;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_scale_texindex_texturew_thresholds.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_scale_texindex_texturew_thresholds.glsl
deleted file mode 100644
index ba8e732..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_scale_texindex_texturew_thresholds.glsl
+++ /dev/null
@@ -1,86 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uleftBorderColor_Stage1_c0_c0;
-uniform mediump vec4 urightBorderColor_Stage1_c0_c0;
-uniform highp vec4 uscale0_1_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale2_3_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale4_5_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias0_1_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias2_3_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias4_5_Stage1_c0_c0_c1_c0;
-uniform mediump vec4 uthresholds1_7_Stage1_c0_c0_c1_c0;
-uniform mediump vec4 uthresholds9_13_Stage1_c0_c0_c1_c0;
-uniform sampler2D uTextureSampler_0_Stage0;
-varying highp vec2 vTextureCoords_Stage0;
-varying highp float vTexIndex_Stage0;
-varying mediump vec4 vinColor_Stage0;
-varying highp vec2 vTransformedCoords_0_Stage0;
-mediump vec4 stage_Stage1_c0_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1099_c0_c0;
- mediump float t = vTransformedCoords_0_Stage0.x + 9.999999747378752e-06;
- _sample1099_c0_c0 = vec4(t, 1.0, 0.0, 0.0);
- return _sample1099_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0_c1_c0(mediump vec4 _input) {
- mediump vec4 _sample1767_c0_c0;
- mediump float t = _input.x;
- highp vec4 scale, bias;
- {
- if (t < uthresholds1_7_Stage1_c0_c0_c1_c0.y) {
- if (t < uthresholds1_7_Stage1_c0_c0_c1_c0.x) {
- scale = uscale0_1_Stage1_c0_c0_c1_c0;
- bias = ubias0_1_Stage1_c0_c0_c1_c0;
- } else {
- scale = uscale2_3_Stage1_c0_c0_c1_c0;
- bias = ubias2_3_Stage1_c0_c0_c1_c0;
- }
- } else {
- {
- scale = uscale4_5_Stage1_c0_c0_c1_c0;
- bias = ubias4_5_Stage1_c0_c0_c1_c0;
- }
- }
- }
- _sample1767_c0_c0 = t * scale + bias;
- return _sample1767_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1992;
- mediump vec4 _sample1099_c0_c0;
- _sample1099_c0_c0 = stage_Stage1_c0_c0_c0_c0(vec4(1.0));
- mediump vec4 t = _sample1099_c0_c0;
- if (t.x < 0.0) {
- _sample1992 = uleftBorderColor_Stage1_c0_c0;
- } else if (t.x > 1.0) {
- _sample1992 = urightBorderColor_Stage1_c0_c0;
- } else {
- mediump vec4 _sample1767_c0_c0;
- _sample1767_c0_c0 = stage_Stage1_c0_c0_c1_c0(t);
- _sample1992 = _sample1767_c0_c0;
- }
- {
- _sample1992.xyz *= _sample1992.w;
- }
- return _sample1992;
-}
-void main() {
- mediump vec4 outputCoverage_Stage0;
- {
- mediump vec4 texColor;
- {
- texColor = texture2D(uTextureSampler_0_Stage0, vTextureCoords_Stage0).wwww;
- }
- outputCoverage_Stage0 = texColor;
- }
- mediump vec4 output_Stage1;
- {
- mediump vec4 _sample1992;
- _sample1992 = stage_Stage1_c0_c0(vec4(1.0, 1.0, 1.0, 1.0));
- output_Stage1 = _sample1992;
- }
- {
- gl_FragColor = output_Stage1 * outputCoverage_Stage0;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_scale_texture_threshold.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_scale_texture_threshold.glsl
deleted file mode 100644
index 4522b68..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_scale_texture_threshold.glsl
+++ /dev/null
@@ -1,89 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uleftBorderColor_Stage2_c1_c0_c0_c0;
-uniform mediump vec4 urightBorderColor_Stage2_c1_c0_c0_c0;
-uniform mediump float ubias_Stage2_c1_c0_c0_c0_c0_c0;
-uniform mediump float uscale_Stage2_c1_c0_c0_c0_c0_c0;
-uniform highp vec4 uscale01_Stage2_c1_c0_c0_c0_c1_c0;
-uniform highp vec4 ubias01_Stage2_c1_c0_c0_c0_c1_c0;
-uniform highp vec4 uscale23_Stage2_c1_c0_c0_c0_c1_c0;
-uniform highp vec4 ubias23_Stage2_c1_c0_c0_c0_c1_c0;
-uniform mediump float uthreshold_Stage2_c1_c0_c0_c0_c1_c0;
-uniform sampler2D uTextureSampler_0_Stage1;
-varying highp vec2 vTransformedCoords_0_Stage0;
-varying highp vec2 vTransformedCoords_1_Stage0;
-varying mediump vec4 vcolor_Stage0;
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1992;
- _sample1992 = _input * texture2D(uTextureSampler_0_Stage1, vTransformedCoords_0_Stage0);
- return _sample1992;
-}
-mediump vec4 stage_Stage2_c1_c0_c0_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1099_c1_c0_c0_c0;
- mediump float angle;
- {
- angle = atan(-vTransformedCoords_1_Stage0.y, -vTransformedCoords_1_Stage0.x);
- }
- mediump float t = ((angle * 0.15915493667125702 + 0.5) + ubias_Stage2_c1_c0_c0_c0_c0_c0) * uscale_Stage2_c1_c0_c0_c0_c0_c0;
- _sample1099_c1_c0_c0_c0 = vec4(t, 1.0, 0.0, 0.0);
- return _sample1099_c1_c0_c0_c0;
-}
-mediump vec4 stage_Stage2_c1_c0_c0_c0_c1_c0(mediump vec4 _input) {
- mediump vec4 _sample1767_c1_c0_c0_c0;
- mediump float t = _input.x;
- highp vec4 scale, bias;
- if (t < uthreshold_Stage2_c1_c0_c0_c0_c1_c0) {
- scale = uscale01_Stage2_c1_c0_c0_c0_c1_c0;
- bias = ubias01_Stage2_c1_c0_c0_c0_c1_c0;
- } else {
- scale = uscale23_Stage2_c1_c0_c0_c0_c1_c0;
- bias = ubias23_Stage2_c1_c0_c0_c0_c1_c0;
- }
- _sample1767_c1_c0_c0_c0 = t * scale + bias;
- return _sample1767_c1_c0_c0_c0;
-}
-mediump vec4 stage_Stage2_c1_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 child_c1_c0;
- mediump vec4 _sample1099_c1_c0_c0_c0;
- _sample1099_c1_c0_c0_c0 = stage_Stage2_c1_c0_c0_c0_c0_c0(vec4(1.0));
- mediump vec4 t = _sample1099_c1_c0_c0_c0;
- if (t.x < 0.0) {
- child_c1_c0 = uleftBorderColor_Stage2_c1_c0_c0_c0;
- } else if (t.x > 1.0) {
- child_c1_c0 = urightBorderColor_Stage2_c1_c0_c0_c0;
- } else {
- mediump vec4 _sample1767_c1_c0_c0_c0;
- _sample1767_c1_c0_c0_c0 = stage_Stage2_c1_c0_c0_c0_c1_c0(t);
- child_c1_c0 = _sample1767_c1_c0_c0_c0;
- }
- {
- child_c1_c0.xyz *= child_c1_c0.w;
- }
- return child_c1_c0;
-}
-mediump vec4 stage_Stage2_c1_c0(mediump vec4 _input) {
- mediump vec4 child;
- mediump vec4 child_c1_c0;
- child_c1_c0 = stage_Stage2_c1_c0_c0_c0(vec4(1.0));
- child = child_c1_c0 * _input.w;
- return child;
-}
-void main() {
- mediump vec4 output_Stage1;
- {
- mediump vec4 _sample1992;
- _sample1992 = stage_Stage1_c0_c0(vec4(1.0, 1.0, 1.0, 1.0));
- output_Stage1 = _sample1992;
- }
- mediump vec4 output_Stage2;
- {
- mediump vec4 child;
- child = stage_Stage2_c1_c0(vec4(1.0));
- output_Stage2 = vec4(child.w);
- }
- {
- gl_FragColor = output_Stage1 * output_Stage2;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_scale_texturew_threshold.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_scale_texturew_threshold.glsl
deleted file mode 100644
index 8ea773f..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_scale_texturew_threshold.glsl
+++ /dev/null
@@ -1,69 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uleftBorderColor_Stage1_c0_c0;
-uniform mediump vec4 urightBorderColor_Stage1_c0_c0;
-uniform highp vec4 uscale01_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias01_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale23_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias23_Stage1_c0_c0_c1_c0;
-uniform mediump float uthreshold_Stage1_c0_c0_c1_c0;
-uniform sampler2D uTextureSampler_0_Stage2;
-varying highp vec2 vTransformedCoords_0_Stage0;
-varying highp vec2 vTransformedCoords_1_Stage0;
-varying mediump vec4 vcolor_Stage0;
-mediump vec4 stage_Stage1_c0_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1099_c0_c0;
- mediump float t = vTransformedCoords_0_Stage0.x + 9.999999747378752e-06;
- _sample1099_c0_c0 = vec4(t, 1.0, 0.0, 0.0);
- return _sample1099_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0_c1_c0(mediump vec4 _input) {
- mediump vec4 _sample1767_c0_c0;
- mediump float t = _input.x;
- highp vec4 scale, bias;
- if (t < uthreshold_Stage1_c0_c0_c1_c0) {
- scale = uscale01_Stage1_c0_c0_c1_c0;
- bias = ubias01_Stage1_c0_c0_c1_c0;
- } else {
- scale = uscale23_Stage1_c0_c0_c1_c0;
- bias = ubias23_Stage1_c0_c0_c1_c0;
- }
- _sample1767_c0_c0 = t * scale + bias;
- return _sample1767_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1992;
- mediump vec4 _sample1099_c0_c0;
- _sample1099_c0_c0 = stage_Stage1_c0_c0_c0_c0(vec4(1.0));
- mediump vec4 t = _sample1099_c0_c0;
- if (t.x < 0.0) {
- _sample1992 = uleftBorderColor_Stage1_c0_c0;
- } else if (t.x > 1.0) {
- _sample1992 = urightBorderColor_Stage1_c0_c0;
- } else {
- mediump vec4 _sample1767_c0_c0;
- _sample1767_c0_c0 = stage_Stage1_c0_c0_c1_c0(t);
- _sample1992 = _sample1767_c0_c0;
- }
- {
- _sample1992.xyz *= _sample1992.w;
- }
- return _sample1992;
-}
-void main() {
- mediump vec4 output_Stage1;
- {
- mediump vec4 _sample1992;
- _sample1992 = stage_Stage1_c0_c0(vec4(1.0, 1.0, 1.0, 1.0));
- output_Stage1 = _sample1992;
- }
- mediump vec4 output_Stage2;
- {
- output_Stage2 = texture2D(uTextureSampler_0_Stage2, vTransformedCoords_1_Stage0).wwww;
- }
- {
- gl_FragColor = output_Stage1 * output_Stage2;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_scale_threshold.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_scale_threshold.glsl
deleted file mode 100644
index 2cec90c..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_scale_threshold.glsl
+++ /dev/null
@@ -1,67 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uleftBorderColor_Stage1_c0_c0;
-uniform mediump vec4 urightBorderColor_Stage1_c0_c0;
-uniform highp vec4 uscale01_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias01_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale23_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias23_Stage1_c0_c0_c1_c0;
-uniform mediump float uthreshold_Stage1_c0_c0_c1_c0;
-varying highp vec2 vTransformedCoords_0_Stage0;
-varying mediump vec4 vcolor_Stage0;
-mediump vec4 stage_Stage1_c0_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1099_c0_c0;
- mediump float t = vTransformedCoords_0_Stage0.x + 9.999999747378752e-06;
- _sample1099_c0_c0 = vec4(t, 1.0, 0.0, 0.0);
- return _sample1099_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0_c1_c0(mediump vec4 _input) {
- mediump vec4 _sample1767_c0_c0;
- mediump float t = _input.x;
- highp vec4 scale, bias;
- if (t < uthreshold_Stage1_c0_c0_c1_c0) {
- scale = uscale01_Stage1_c0_c0_c1_c0;
- bias = ubias01_Stage1_c0_c0_c1_c0;
- } else {
- scale = uscale23_Stage1_c0_c0_c1_c0;
- bias = ubias23_Stage1_c0_c0_c1_c0;
- }
- _sample1767_c0_c0 = t * scale + bias;
- return _sample1767_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 child;
- mediump vec4 _sample1099_c0_c0;
- _sample1099_c0_c0 = stage_Stage1_c0_c0_c0_c0(vec4(1.0));
- mediump vec4 t = _sample1099_c0_c0;
- if (t.x < 0.0) {
- child = uleftBorderColor_Stage1_c0_c0;
- } else if (t.x > 1.0) {
- child = urightBorderColor_Stage1_c0_c0;
- } else {
- mediump vec4 _sample1767_c0_c0;
- _sample1767_c0_c0 = stage_Stage1_c0_c0_c1_c0(t);
- child = _sample1767_c0_c0;
- }
- {
- child.xyz *= child.w;
- }
- return child;
-}
-void main() {
- mediump vec4 outputColor_Stage0;
- {
- outputColor_Stage0 = vcolor_Stage0;
- }
- mediump vec4 output_Stage1;
- {
- mediump vec4 child;
- child = stage_Stage1_c0_c0(vec4(1.0));
- output_Stage1 = child * outputColor_Stage0.w;
- }
- {
- gl_FragColor = output_Stage1;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_scale_threshold_2.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_scale_threshold_2.glsl
deleted file mode 100644
index 8335e68..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_scale_threshold_2.glsl
+++ /dev/null
@@ -1,60 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uleftBorderColor_Stage1_c0_c0;
-uniform mediump vec4 urightBorderColor_Stage1_c0_c0;
-uniform highp vec4 uscale01_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias01_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale23_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias23_Stage1_c0_c0_c1_c0;
-uniform mediump float uthreshold_Stage1_c0_c0_c1_c0;
-varying highp vec2 vTransformedCoords_0_Stage0;
-varying mediump vec4 vcolor_Stage0;
-mediump vec4 stage_Stage1_c0_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1099_c0_c0;
- mediump float t = vTransformedCoords_0_Stage0.x + 9.999999747378752e-06;
- _sample1099_c0_c0 = vec4(t, 1.0, 0.0, 0.0);
- return _sample1099_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0_c1_c0(mediump vec4 _input) {
- mediump vec4 _sample1767_c0_c0;
- mediump float t = _input.x;
- highp vec4 scale, bias;
- if (t < uthreshold_Stage1_c0_c0_c1_c0) {
- scale = uscale01_Stage1_c0_c0_c1_c0;
- bias = ubias01_Stage1_c0_c0_c1_c0;
- } else {
- scale = uscale23_Stage1_c0_c0_c1_c0;
- bias = ubias23_Stage1_c0_c0_c1_c0;
- }
- _sample1767_c0_c0 = t * scale + bias;
- return _sample1767_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1992;
- mediump vec4 _sample1099_c0_c0;
- _sample1099_c0_c0 = stage_Stage1_c0_c0_c0_c0(vec4(1.0));
- mediump vec4 t = _sample1099_c0_c0;
- if (t.x < 0.0) {
- _sample1992 = uleftBorderColor_Stage1_c0_c0;
- } else if (t.x > 1.0) {
- _sample1992 = urightBorderColor_Stage1_c0_c0;
- } else {
- mediump vec4 _sample1767_c0_c0;
- _sample1767_c0_c0 = stage_Stage1_c0_c0_c1_c0(t);
- _sample1992 = _sample1767_c0_c0;
- }
- return _sample1992;
-}
-void main() {
- mediump vec4 output_Stage1;
- {
- mediump vec4 _sample1992;
- _sample1992 = stage_Stage1_c0_c0(vec4(1.0, 1.0, 1.0, 1.0));
- output_Stage1 = _sample1992;
- }
- {
- gl_FragColor = output_Stage1;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_scale_thresholds.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_scale_thresholds.glsl
deleted file mode 100644
index 7685597..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_scale_thresholds.glsl
+++ /dev/null
@@ -1,84 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uleftBorderColor_Stage1_c0_c0;
-uniform mediump vec4 urightBorderColor_Stage1_c0_c0;
-uniform highp vec4 uscale0_1_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale2_3_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale4_5_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale6_7_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias0_1_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias2_3_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias4_5_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias6_7_Stage1_c0_c0_c1_c0;
-uniform mediump vec4 uthresholds1_7_Stage1_c0_c0_c1_c0;
-uniform mediump vec4 uthresholds9_13_Stage1_c0_c0_c1_c0;
-varying mediump vec4 vcolor_Stage0;
-varying highp vec2 vTransformedCoords_0_Stage0;
-mediump vec4 stage_Stage1_c0_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1099_c0_c0;
- mediump float t = vTransformedCoords_0_Stage0.x + 9.999999747378752e-06;
- _sample1099_c0_c0 = vec4(t, 1.0, 0.0, 0.0);
- return _sample1099_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0_c1_c0(mediump vec4 _input) {
- mediump vec4 _sample1767_c0_c0;
- mediump float t = _input.x;
- highp vec4 scale, bias;
- {
- if (t < uthresholds1_7_Stage1_c0_c0_c1_c0.y) {
- if (t < uthresholds1_7_Stage1_c0_c0_c1_c0.x) {
- scale = uscale0_1_Stage1_c0_c0_c1_c0;
- bias = ubias0_1_Stage1_c0_c0_c1_c0;
- } else {
- scale = uscale2_3_Stage1_c0_c0_c1_c0;
- bias = ubias2_3_Stage1_c0_c0_c1_c0;
- }
- } else {
- if (t < uthresholds1_7_Stage1_c0_c0_c1_c0.z) {
- scale = uscale4_5_Stage1_c0_c0_c1_c0;
- bias = ubias4_5_Stage1_c0_c0_c1_c0;
- } else {
- scale = uscale6_7_Stage1_c0_c0_c1_c0;
- bias = ubias6_7_Stage1_c0_c0_c1_c0;
- }
- }
- }
- _sample1767_c0_c0 = t * scale + bias;
- return _sample1767_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 child;
- mediump vec4 _sample1099_c0_c0;
- _sample1099_c0_c0 = stage_Stage1_c0_c0_c0_c0(vec4(1.0));
- mediump vec4 t = _sample1099_c0_c0;
- if (t.x < 0.0) {
- child = uleftBorderColor_Stage1_c0_c0;
- } else if (t.x > 1.0) {
- child = urightBorderColor_Stage1_c0_c0;
- } else {
- mediump vec4 _sample1767_c0_c0;
- _sample1767_c0_c0 = stage_Stage1_c0_c0_c1_c0(t);
- child = _sample1767_c0_c0;
- }
- {
- child.xyz *= child.w;
- }
- return child;
-}
-void main() {
- mediump vec4 outputColor_Stage0;
- {
- outputColor_Stage0 = vcolor_Stage0;
- }
- mediump vec4 output_Stage1;
- {
- mediump vec4 child;
- child = stage_Stage1_c0_c0(vec4(1.0));
- output_Stage1 = child * outputColor_Stage0.w;
- }
- {
- gl_FragColor = output_Stage1;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_scale_thresholds_2.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_scale_thresholds_2.glsl
deleted file mode 100644
index a1f1060..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_scale_thresholds_2.glsl
+++ /dev/null
@@ -1,84 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uleftBorderColor_Stage1_c0_c0;
-uniform mediump vec4 urightBorderColor_Stage1_c0_c0;
-uniform highp vec4 uscale0_1_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale2_3_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale4_5_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale6_7_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias0_1_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias2_3_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias4_5_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias6_7_Stage1_c0_c0_c1_c0;
-uniform mediump vec4 uthresholds1_7_Stage1_c0_c0_c1_c0;
-uniform mediump vec4 uthresholds9_13_Stage1_c0_c0_c1_c0;
-varying highp vec2 vTransformedCoords_0_Stage0;
-varying mediump vec4 vcolor_Stage0;
-mediump vec4 stage_Stage1_c0_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1099_c0_c0;
- mediump float t = vTransformedCoords_0_Stage0.x + 9.999999747378752e-06;
- _sample1099_c0_c0 = vec4(t, 1.0, 0.0, 0.0);
- return _sample1099_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0_c1_c0(mediump vec4 _input) {
- mediump vec4 _sample1767_c0_c0;
- mediump float t = _input.x;
- highp vec4 scale, bias;
- {
- if (t < uthresholds1_7_Stage1_c0_c0_c1_c0.y) {
- if (t < uthresholds1_7_Stage1_c0_c0_c1_c0.x) {
- scale = uscale0_1_Stage1_c0_c0_c1_c0;
- bias = ubias0_1_Stage1_c0_c0_c1_c0;
- } else {
- scale = uscale2_3_Stage1_c0_c0_c1_c0;
- bias = ubias2_3_Stage1_c0_c0_c1_c0;
- }
- } else {
- if (t < uthresholds1_7_Stage1_c0_c0_c1_c0.z) {
- scale = uscale4_5_Stage1_c0_c0_c1_c0;
- bias = ubias4_5_Stage1_c0_c0_c1_c0;
- } else {
- scale = uscale6_7_Stage1_c0_c0_c1_c0;
- bias = ubias6_7_Stage1_c0_c0_c1_c0;
- }
- }
- }
- _sample1767_c0_c0 = t * scale + bias;
- return _sample1767_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 child;
- mediump vec4 _sample1099_c0_c0;
- _sample1099_c0_c0 = stage_Stage1_c0_c0_c0_c0(vec4(1.0));
- mediump vec4 t = _sample1099_c0_c0;
- if (t.x < 0.0) {
- child = uleftBorderColor_Stage1_c0_c0;
- } else if (t.x > 1.0) {
- child = urightBorderColor_Stage1_c0_c0;
- } else {
- mediump vec4 _sample1767_c0_c0;
- _sample1767_c0_c0 = stage_Stage1_c0_c0_c1_c0(t);
- child = _sample1767_c0_c0;
- }
- {
- child.xyz *= child.w;
- }
- return child;
-}
-void main() {
- mediump vec4 outputColor_Stage0;
- {
- outputColor_Stage0 = vcolor_Stage0;
- }
- mediump vec4 output_Stage1;
- {
- mediump vec4 child;
- child = stage_Stage1_c0_c0(vec4(1.0));
- output_Stage1 = child * outputColor_Stage0.w;
- }
- {
- gl_FragColor = output_Stage1;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_scale_thresholds_3.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_scale_thresholds_3.glsl
deleted file mode 100644
index ecc4cb4..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_scale_thresholds_3.glsl
+++ /dev/null
@@ -1,81 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uleftBorderColor_Stage1_c0_c0;
-uniform mediump vec4 urightBorderColor_Stage1_c0_c0;
-uniform highp vec4 uscale0_1_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale2_3_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale4_5_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale6_7_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias0_1_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias2_3_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias4_5_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias6_7_Stage1_c0_c0_c1_c0;
-uniform mediump vec4 uthresholds1_7_Stage1_c0_c0_c1_c0;
-uniform mediump vec4 uthresholds9_13_Stage1_c0_c0_c1_c0;
-varying highp vec2 vTransformedCoords_0_Stage0;
-varying mediump vec4 vcolor_Stage0;
-mediump vec4 stage_Stage1_c0_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1099_c0_c0;
- mediump float t = vTransformedCoords_0_Stage0.x + 9.999999747378752e-06;
- _sample1099_c0_c0 = vec4(t, 1.0, 0.0, 0.0);
- return _sample1099_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0_c1_c0(mediump vec4 _input) {
- mediump vec4 _sample1767_c0_c0;
- mediump float t = _input.x;
- highp vec4 scale, bias;
- {
- if (t < uthresholds1_7_Stage1_c0_c0_c1_c0.y) {
- if (t < uthresholds1_7_Stage1_c0_c0_c1_c0.x) {
- scale = uscale0_1_Stage1_c0_c0_c1_c0;
- bias = ubias0_1_Stage1_c0_c0_c1_c0;
- } else {
- scale = uscale2_3_Stage1_c0_c0_c1_c0;
- bias = ubias2_3_Stage1_c0_c0_c1_c0;
- }
- } else {
- if (t < uthresholds1_7_Stage1_c0_c0_c1_c0.z) {
- scale = uscale4_5_Stage1_c0_c0_c1_c0;
- bias = ubias4_5_Stage1_c0_c0_c1_c0;
- } else {
- scale = uscale6_7_Stage1_c0_c0_c1_c0;
- bias = ubias6_7_Stage1_c0_c0_c1_c0;
- }
- }
- }
- _sample1767_c0_c0 = t * scale + bias;
- return _sample1767_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 child;
- mediump vec4 _sample1099_c0_c0;
- _sample1099_c0_c0 = stage_Stage1_c0_c0_c0_c0(vec4(1.0));
- mediump vec4 t = _sample1099_c0_c0;
- if (t.x < 0.0) {
- child = uleftBorderColor_Stage1_c0_c0;
- } else if (t.x > 1.0) {
- child = urightBorderColor_Stage1_c0_c0;
- } else {
- mediump vec4 _sample1767_c0_c0;
- _sample1767_c0_c0 = stage_Stage1_c0_c0_c1_c0(t);
- child = _sample1767_c0_c0;
- }
- return child;
-}
-void main() {
- mediump vec4 outputColor_Stage0;
- {
- outputColor_Stage0 = vcolor_Stage0;
- }
- mediump vec4 output_Stage1;
- {
- mediump vec4 child;
- child = stage_Stage1_c0_c0(vec4(1.0));
- output_Stage1 = child * outputColor_Stage0.w;
- }
- {
- gl_FragColor = output_Stage1;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_scale_thresholds_4.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_scale_thresholds_4.glsl
deleted file mode 100644
index 71ce693..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_scale_thresholds_4.glsl
+++ /dev/null
@@ -1,91 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uleftBorderColor_Stage1_c0_c0;
-uniform mediump vec4 urightBorderColor_Stage1_c0_c0;
-uniform highp vec4 uscale0_1_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale2_3_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale4_5_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale6_7_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale8_9_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale10_11_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias0_1_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias2_3_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias4_5_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias6_7_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias8_9_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias10_11_Stage1_c0_c0_c1_c0;
-uniform mediump vec4 uthresholds1_7_Stage1_c0_c0_c1_c0;
-uniform mediump vec4 uthresholds9_13_Stage1_c0_c0_c1_c0;
-varying highp vec2 vTransformedCoords_0_Stage0;
-varying mediump vec4 vcolor_Stage0;
-mediump vec4 stage_Stage1_c0_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1099_c0_c0;
- mediump float t = vTransformedCoords_0_Stage0.x + 9.999999747378752e-06;
- _sample1099_c0_c0 = vec4(t, 1.0, 0.0, 0.0);
- return _sample1099_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0_c1_c0(mediump vec4 _input) {
- mediump vec4 _sample1767_c0_c0;
- mediump float t = _input.x;
- highp vec4 scale, bias;
- if (t < uthresholds1_7_Stage1_c0_c0_c1_c0.w) {
- if (t < uthresholds1_7_Stage1_c0_c0_c1_c0.y) {
- if (t < uthresholds1_7_Stage1_c0_c0_c1_c0.x) {
- scale = uscale0_1_Stage1_c0_c0_c1_c0;
- bias = ubias0_1_Stage1_c0_c0_c1_c0;
- } else {
- scale = uscale2_3_Stage1_c0_c0_c1_c0;
- bias = ubias2_3_Stage1_c0_c0_c1_c0;
- }
- } else {
- if (t < uthresholds1_7_Stage1_c0_c0_c1_c0.z) {
- scale = uscale4_5_Stage1_c0_c0_c1_c0;
- bias = ubias4_5_Stage1_c0_c0_c1_c0;
- } else {
- scale = uscale6_7_Stage1_c0_c0_c1_c0;
- bias = ubias6_7_Stage1_c0_c0_c1_c0;
- }
- }
- } else {
- {
- if (t < uthresholds9_13_Stage1_c0_c0_c1_c0.x) {
- scale = uscale8_9_Stage1_c0_c0_c1_c0;
- bias = ubias8_9_Stage1_c0_c0_c1_c0;
- } else {
- scale = uscale10_11_Stage1_c0_c0_c1_c0;
- bias = ubias10_11_Stage1_c0_c0_c1_c0;
- }
- }
- }
- _sample1767_c0_c0 = t * scale + bias;
- return _sample1767_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1992;
- mediump vec4 _sample1099_c0_c0;
- _sample1099_c0_c0 = stage_Stage1_c0_c0_c0_c0(vec4(1.0));
- mediump vec4 t = _sample1099_c0_c0;
- if (t.x < 0.0) {
- _sample1992 = uleftBorderColor_Stage1_c0_c0;
- } else if (t.x > 1.0) {
- _sample1992 = urightBorderColor_Stage1_c0_c0;
- } else {
- mediump vec4 _sample1767_c0_c0;
- _sample1767_c0_c0 = stage_Stage1_c0_c0_c1_c0(t);
- _sample1992 = _sample1767_c0_c0;
- }
- return _sample1992;
-}
-void main() {
- mediump vec4 output_Stage1;
- {
- mediump vec4 _sample1992;
- _sample1992 = stage_Stage1_c0_c0(vec4(1.0, 1.0, 1.0, 1.0));
- output_Stage1 = _sample1992;
- }
- {
- gl_FragColor = output_Stage1;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_scale_thresholds_5.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_scale_thresholds_5.glsl
deleted file mode 100644
index 4fb093a..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_scale_thresholds_5.glsl
+++ /dev/null
@@ -1,77 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uleftBorderColor_Stage1_c0_c0;
-uniform mediump vec4 urightBorderColor_Stage1_c0_c0;
-uniform highp vec4 uscale0_1_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale2_3_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale4_5_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale6_7_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias0_1_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias2_3_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias4_5_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias6_7_Stage1_c0_c0_c1_c0;
-uniform mediump vec4 uthresholds1_7_Stage1_c0_c0_c1_c0;
-uniform mediump vec4 uthresholds9_13_Stage1_c0_c0_c1_c0;
-varying highp vec2 vTransformedCoords_0_Stage0;
-varying mediump vec4 vcolor_Stage0;
-mediump vec4 stage_Stage1_c0_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1099_c0_c0;
- mediump float t = vTransformedCoords_0_Stage0.x + 9.999999747378752e-06;
- _sample1099_c0_c0 = vec4(t, 1.0, 0.0, 0.0);
- return _sample1099_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0_c1_c0(mediump vec4 _input) {
- mediump vec4 _sample1767_c0_c0;
- mediump float t = _input.x;
- highp vec4 scale, bias;
- {
- if (t < uthresholds1_7_Stage1_c0_c0_c1_c0.y) {
- if (t < uthresholds1_7_Stage1_c0_c0_c1_c0.x) {
- scale = uscale0_1_Stage1_c0_c0_c1_c0;
- bias = ubias0_1_Stage1_c0_c0_c1_c0;
- } else {
- scale = uscale2_3_Stage1_c0_c0_c1_c0;
- bias = ubias2_3_Stage1_c0_c0_c1_c0;
- }
- } else {
- if (t < uthresholds1_7_Stage1_c0_c0_c1_c0.z) {
- scale = uscale4_5_Stage1_c0_c0_c1_c0;
- bias = ubias4_5_Stage1_c0_c0_c1_c0;
- } else {
- scale = uscale6_7_Stage1_c0_c0_c1_c0;
- bias = ubias6_7_Stage1_c0_c0_c1_c0;
- }
- }
- }
- _sample1767_c0_c0 = t * scale + bias;
- return _sample1767_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1992;
- mediump vec4 _sample1099_c0_c0;
- _sample1099_c0_c0 = stage_Stage1_c0_c0_c0_c0(vec4(1.0));
- mediump vec4 t = _sample1099_c0_c0;
- if (t.x < 0.0) {
- _sample1992 = uleftBorderColor_Stage1_c0_c0;
- } else if (t.x > 1.0) {
- _sample1992 = urightBorderColor_Stage1_c0_c0;
- } else {
- mediump vec4 _sample1767_c0_c0;
- _sample1767_c0_c0 = stage_Stage1_c0_c0_c1_c0(t);
- _sample1992 = _sample1767_c0_c0;
- }
- return _sample1992;
-}
-void main() {
- mediump vec4 output_Stage1;
- {
- mediump vec4 _sample1992;
- _sample1992 = stage_Stage1_c0_c0(vec4(1.0, 1.0, 1.0, 1.0));
- output_Stage1 = _sample1992;
- }
- {
- gl_FragColor = output_Stage1;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_scale_thresholds_6.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_scale_thresholds_6.glsl
deleted file mode 100644
index bffd97d..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_scale_thresholds_6.glsl
+++ /dev/null
@@ -1,94 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uleftBorderColor_Stage1_c0_c0;
-uniform mediump vec4 urightBorderColor_Stage1_c0_c0;
-uniform highp vec4 uscale0_1_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale2_3_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale4_5_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale6_7_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale8_9_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale10_11_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias0_1_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias2_3_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias4_5_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias6_7_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias8_9_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias10_11_Stage1_c0_c0_c1_c0;
-uniform mediump vec4 uthresholds1_7_Stage1_c0_c0_c1_c0;
-uniform mediump vec4 uthresholds9_13_Stage1_c0_c0_c1_c0;
-varying highp vec2 vTransformedCoords_0_Stage0;
-varying mediump vec4 vcolor_Stage0;
-mediump vec4 stage_Stage1_c0_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1099_c0_c0;
- mediump float t = vTransformedCoords_0_Stage0.x + 9.999999747378752e-06;
- _sample1099_c0_c0 = vec4(t, 1.0, 0.0, 0.0);
- return _sample1099_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0_c1_c0(mediump vec4 _input) {
- mediump vec4 _sample1767_c0_c0;
- mediump float t = _input.x;
- highp vec4 scale, bias;
- if (t < uthresholds1_7_Stage1_c0_c0_c1_c0.w) {
- if (t < uthresholds1_7_Stage1_c0_c0_c1_c0.y) {
- if (t < uthresholds1_7_Stage1_c0_c0_c1_c0.x) {
- scale = uscale0_1_Stage1_c0_c0_c1_c0;
- bias = ubias0_1_Stage1_c0_c0_c1_c0;
- } else {
- scale = uscale2_3_Stage1_c0_c0_c1_c0;
- bias = ubias2_3_Stage1_c0_c0_c1_c0;
- }
- } else {
- if (t < uthresholds1_7_Stage1_c0_c0_c1_c0.z) {
- scale = uscale4_5_Stage1_c0_c0_c1_c0;
- bias = ubias4_5_Stage1_c0_c0_c1_c0;
- } else {
- scale = uscale6_7_Stage1_c0_c0_c1_c0;
- bias = ubias6_7_Stage1_c0_c0_c1_c0;
- }
- }
- } else {
- {
- if (t < uthresholds9_13_Stage1_c0_c0_c1_c0.x) {
- scale = uscale8_9_Stage1_c0_c0_c1_c0;
- bias = ubias8_9_Stage1_c0_c0_c1_c0;
- } else {
- scale = uscale10_11_Stage1_c0_c0_c1_c0;
- bias = ubias10_11_Stage1_c0_c0_c1_c0;
- }
- }
- }
- _sample1767_c0_c0 = t * scale + bias;
- return _sample1767_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1992;
- mediump vec4 _sample1099_c0_c0;
- _sample1099_c0_c0 = stage_Stage1_c0_c0_c0_c0(vec4(1.0));
- mediump vec4 t = _sample1099_c0_c0;
- if (t.x < 0.0) {
- _sample1992 = uleftBorderColor_Stage1_c0_c0;
- } else if (t.x > 1.0) {
- _sample1992 = urightBorderColor_Stage1_c0_c0;
- } else {
- mediump vec4 _sample1767_c0_c0;
- _sample1767_c0_c0 = stage_Stage1_c0_c0_c1_c0(t);
- _sample1992 = _sample1767_c0_c0;
- }
- {
- _sample1992.xyz *= _sample1992.w;
- }
- return _sample1992;
-}
-void main() {
- mediump vec4 output_Stage1;
- {
- mediump vec4 _sample1992;
- _sample1992 = stage_Stage1_c0_c0(vec4(1.0, 1.0, 1.0, 1.0));
- output_Stage1 = _sample1992;
- }
- {
- gl_FragColor = output_Stage1;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_scale_thresholds_7.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_scale_thresholds_7.glsl
deleted file mode 100644
index 9324401..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_scale_thresholds_7.glsl
+++ /dev/null
@@ -1,80 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uleftBorderColor_Stage1_c0_c0;
-uniform mediump vec4 urightBorderColor_Stage1_c0_c0;
-uniform highp vec4 uscale0_1_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale2_3_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale4_5_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale6_7_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias0_1_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias2_3_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias4_5_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias6_7_Stage1_c0_c0_c1_c0;
-uniform mediump vec4 uthresholds1_7_Stage1_c0_c0_c1_c0;
-uniform mediump vec4 uthresholds9_13_Stage1_c0_c0_c1_c0;
-varying highp vec2 vTransformedCoords_0_Stage0;
-varying mediump vec4 vcolor_Stage0;
-mediump vec4 stage_Stage1_c0_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1099_c0_c0;
- mediump float t = vTransformedCoords_0_Stage0.x + 9.999999747378752e-06;
- _sample1099_c0_c0 = vec4(t, 1.0, 0.0, 0.0);
- return _sample1099_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0_c1_c0(mediump vec4 _input) {
- mediump vec4 _sample1767_c0_c0;
- mediump float t = _input.x;
- highp vec4 scale, bias;
- {
- if (t < uthresholds1_7_Stage1_c0_c0_c1_c0.y) {
- if (t < uthresholds1_7_Stage1_c0_c0_c1_c0.x) {
- scale = uscale0_1_Stage1_c0_c0_c1_c0;
- bias = ubias0_1_Stage1_c0_c0_c1_c0;
- } else {
- scale = uscale2_3_Stage1_c0_c0_c1_c0;
- bias = ubias2_3_Stage1_c0_c0_c1_c0;
- }
- } else {
- if (t < uthresholds1_7_Stage1_c0_c0_c1_c0.z) {
- scale = uscale4_5_Stage1_c0_c0_c1_c0;
- bias = ubias4_5_Stage1_c0_c0_c1_c0;
- } else {
- scale = uscale6_7_Stage1_c0_c0_c1_c0;
- bias = ubias6_7_Stage1_c0_c0_c1_c0;
- }
- }
- }
- _sample1767_c0_c0 = t * scale + bias;
- return _sample1767_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1992;
- mediump vec4 _sample1099_c0_c0;
- _sample1099_c0_c0 = stage_Stage1_c0_c0_c0_c0(vec4(1.0));
- mediump vec4 t = _sample1099_c0_c0;
- if (t.x < 0.0) {
- _sample1992 = uleftBorderColor_Stage1_c0_c0;
- } else if (t.x > 1.0) {
- _sample1992 = urightBorderColor_Stage1_c0_c0;
- } else {
- mediump vec4 _sample1767_c0_c0;
- _sample1767_c0_c0 = stage_Stage1_c0_c0_c1_c0(t);
- _sample1992 = _sample1767_c0_c0;
- }
- {
- _sample1992.xyz *= _sample1992.w;
- }
- return _sample1992;
-}
-void main() {
- mediump vec4 output_Stage1;
- {
- mediump vec4 _sample1992;
- _sample1992 = stage_Stage1_c0_c0(vec4(1.0, 1.0, 1.0, 1.0));
- output_Stage1 = _sample1992;
- }
- {
- gl_FragColor = output_Stage1;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_scale_thresholds_8.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_scale_thresholds_8.glsl
deleted file mode 100644
index 39b4e76..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_scale_thresholds_8.glsl
+++ /dev/null
@@ -1,86 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uleftBorderColor_Stage1_c0_c0_c0_c0;
-uniform mediump vec4 urightBorderColor_Stage1_c0_c0_c0_c0;
-uniform highp vec4 uscale0_1_Stage1_c0_c0_c0_c0_c1_c0;
-uniform highp vec4 uscale2_3_Stage1_c0_c0_c0_c0_c1_c0;
-uniform highp vec4 uscale4_5_Stage1_c0_c0_c0_c0_c1_c0;
-uniform highp vec4 ubias0_1_Stage1_c0_c0_c0_c0_c1_c0;
-uniform highp vec4 ubias2_3_Stage1_c0_c0_c0_c0_c1_c0;
-uniform highp vec4 ubias4_5_Stage1_c0_c0_c0_c0_c1_c0;
-uniform mediump vec4 uthresholds1_7_Stage1_c0_c0_c0_c0_c1_c0;
-uniform mediump vec4 uthresholds9_13_Stage1_c0_c0_c0_c0_c1_c0;
-varying highp vec2 vTransformedCoords_0_Stage0;
-varying mediump vec4 vcolor_Stage0;
-mediump vec4 stage_Stage1_c0_c0_c0_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1099_c0_c0_c0_c0;
- mediump float t = vTransformedCoords_0_Stage0.x + 9.999999747378752e-06;
- _sample1099_c0_c0_c0_c0 = vec4(t, 1.0, 0.0, 0.0);
- return _sample1099_c0_c0_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0_c0_c0_c1_c0(mediump vec4 _input) {
- mediump vec4 _sample1767_c0_c0_c0_c0;
- mediump float t = _input.x;
- highp vec4 scale, bias;
- {
- if (t < uthresholds1_7_Stage1_c0_c0_c0_c0_c1_c0.y) {
- if (t < uthresholds1_7_Stage1_c0_c0_c0_c0_c1_c0.x) {
- scale = uscale0_1_Stage1_c0_c0_c0_c0_c1_c0;
- bias = ubias0_1_Stage1_c0_c0_c0_c0_c1_c0;
- } else {
- scale = uscale2_3_Stage1_c0_c0_c0_c0_c1_c0;
- bias = ubias2_3_Stage1_c0_c0_c0_c0_c1_c0;
- }
- } else {
- {
- scale = uscale4_5_Stage1_c0_c0_c0_c0_c1_c0;
- bias = ubias4_5_Stage1_c0_c0_c0_c0_c1_c0;
- }
- }
- }
- _sample1767_c0_c0_c0_c0 = t * scale + bias;
- return _sample1767_c0_c0_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 child_c0_c0;
- mediump vec4 _sample1099_c0_c0_c0_c0;
- _sample1099_c0_c0_c0_c0 = stage_Stage1_c0_c0_c0_c0_c0_c0(vec4(1.0));
- mediump vec4 t = _sample1099_c0_c0_c0_c0;
- if (t.x < 0.0) {
- child_c0_c0 = uleftBorderColor_Stage1_c0_c0_c0_c0;
- } else if (t.x > 1.0) {
- child_c0_c0 = urightBorderColor_Stage1_c0_c0_c0_c0;
- } else {
- mediump vec4 _sample1767_c0_c0_c0_c0;
- _sample1767_c0_c0_c0_c0 = stage_Stage1_c0_c0_c0_c0_c1_c0(t);
- child_c0_c0 = _sample1767_c0_c0_c0_c0;
- }
- {
- child_c0_c0.xyz *= child_c0_c0.w;
- }
- return child_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 child;
- mediump vec4 child_c0_c0;
- child_c0_c0 = stage_Stage1_c0_c0_c0_c0(vec4(1.0));
- child = child_c0_c0 * _input.w;
- return child;
-}
-void main() {
- mediump vec4 outputColor_Stage0;
- {
- outputColor_Stage0 = vcolor_Stage0;
- }
- mediump vec4 output_Stage1;
- {
- mediump vec4 child;
- child = stage_Stage1_c0_c0(vec4(1.0));
- output_Stage1 = vec4(child.w);
- }
- {
- gl_FragColor = outputColor_Stage0 * output_Stage1;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_scale_thresholds_9.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_scale_thresholds_9.glsl
deleted file mode 100644
index 0e27646..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_border_color_scale_thresholds_9.glsl
+++ /dev/null
@@ -1,95 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uleftBorderColor_Stage1_c0_c0;
-uniform mediump vec4 urightBorderColor_Stage1_c0_c0;
-uniform highp vec4 uscale0_1_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale2_3_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale4_5_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale6_7_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale8_9_Stage1_c0_c0_c1_c0;
-uniform highp vec4 uscale10_11_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias0_1_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias2_3_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias4_5_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias6_7_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias8_9_Stage1_c0_c0_c1_c0;
-uniform highp vec4 ubias10_11_Stage1_c0_c0_c1_c0;
-uniform mediump vec4 uthresholds1_7_Stage1_c0_c0_c1_c0;
-uniform mediump vec4 uthresholds9_13_Stage1_c0_c0_c1_c0;
-varying highp vec2 vTransformedCoords_0_Stage0;
-varying mediump vec4 vcolor_Stage0;
-mediump vec4 stage_Stage1_c0_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1099_c0_c0;
- mediump float t = vTransformedCoords_0_Stage0.x + 9.999999747378752e-06;
- _sample1099_c0_c0 = vec4(t, 1.0, 0.0, 0.0);
- return _sample1099_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0_c1_c0(mediump vec4 _input) {
- mediump vec4 _sample1767_c0_c0;
- mediump float t = _input.x;
- highp vec4 scale, bias;
- if (t < uthresholds1_7_Stage1_c0_c0_c1_c0.w) {
- if (t < uthresholds1_7_Stage1_c0_c0_c1_c0.y) {
- if (t < uthresholds1_7_Stage1_c0_c0_c1_c0.x) {
- scale = uscale0_1_Stage1_c0_c0_c1_c0;
- bias = ubias0_1_Stage1_c0_c0_c1_c0;
- } else {
- scale = uscale2_3_Stage1_c0_c0_c1_c0;
- bias = ubias2_3_Stage1_c0_c0_c1_c0;
- }
- } else {
- if (t < uthresholds1_7_Stage1_c0_c0_c1_c0.z) {
- scale = uscale4_5_Stage1_c0_c0_c1_c0;
- bias = ubias4_5_Stage1_c0_c0_c1_c0;
- } else {
- scale = uscale6_7_Stage1_c0_c0_c1_c0;
- bias = ubias6_7_Stage1_c0_c0_c1_c0;
- }
- }
- } else {
- {
- if (t < uthresholds9_13_Stage1_c0_c0_c1_c0.x) {
- scale = uscale8_9_Stage1_c0_c0_c1_c0;
- bias = ubias8_9_Stage1_c0_c0_c1_c0;
- } else {
- scale = uscale10_11_Stage1_c0_c0_c1_c0;
- bias = ubias10_11_Stage1_c0_c0_c1_c0;
- }
- }
- }
- _sample1767_c0_c0 = t * scale + bias;
- return _sample1767_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 child;
- mediump vec4 _sample1099_c0_c0;
- _sample1099_c0_c0 = stage_Stage1_c0_c0_c0_c0(vec4(1.0));
- mediump vec4 t = _sample1099_c0_c0;
- if (t.x < 0.0) {
- child = uleftBorderColor_Stage1_c0_c0;
- } else if (t.x > 1.0) {
- child = urightBorderColor_Stage1_c0_c0;
- } else {
- mediump vec4 _sample1767_c0_c0;
- _sample1767_c0_c0 = stage_Stage1_c0_c0_c1_c0(t);
- child = _sample1767_c0_c0;
- }
- return child;
-}
-void main() {
- mediump vec4 outputColor_Stage0;
- {
- outputColor_Stage0 = vcolor_Stage0;
- }
- mediump vec4 output_Stage1;
- {
- mediump vec4 child;
- child = stage_Stage1_c0_c0(vec4(1.0));
- output_Stage1 = child * outputColor_Stage0.w;
- }
- {
- gl_FragColor = output_Stage1;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_color_scale_texture_threshold.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_color_scale_texture_threshold.glsl
deleted file mode 100644
index 482238d..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_color_scale_texture_threshold.glsl
+++ /dev/null
@@ -1,89 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform highp vec4 uscale01_Stage1_c1_c0_c0_c0_c1_c0;
-uniform highp vec4 ubias01_Stage1_c1_c0_c0_c0_c1_c0;
-uniform highp vec4 uscale23_Stage1_c1_c0_c0_c0_c1_c0;
-uniform highp vec4 ubias23_Stage1_c1_c0_c0_c0_c1_c0;
-uniform mediump float uthreshold_Stage1_c1_c0_c0_c0_c1_c0;
-uniform sampler2D uTextureSampler_0_Stage1;
-varying highp vec2 vTransformedCoords_0_Stage0;
-varying highp vec2 vTransformedCoords_1_Stage0;
-varying mediump vec4 vcolor_Stage0;
-mediump vec4 stage_Stage1_c0_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 child_c0_c0;
- child_c0_c0 = _input * texture2D(uTextureSampler_0_Stage1, vTransformedCoords_0_Stage0);
- return child_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 xfer_src;
- mediump vec4 child_c0_c0;
- child_c0_c0 = stage_Stage1_c0_c0_c0_c0(vec4(1.0));
- xfer_src = child_c0_c0 * _input.w;
- return xfer_src;
-}
-mediump vec4 stage_Stage1_c1_c0_c0_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample453_c1_c0_c0_c0;
- mediump float t = vTransformedCoords_1_Stage0.x + 9.999999747378752e-06;
- _sample453_c1_c0_c0_c0 = vec4(t, 1.0, 0.0, 0.0);
- return _sample453_c1_c0_c0_c0;
-}
-mediump vec4 stage_Stage1_c1_c0_c0_c0_c1_c0(mediump vec4 _input) {
- mediump vec4 _sample1464_c1_c0_c0_c0;
- mediump float t = _input.x;
- highp vec4 scale, bias;
- if (t < uthreshold_Stage1_c1_c0_c0_c0_c1_c0) {
- scale = uscale01_Stage1_c1_c0_c0_c0_c1_c0;
- bias = ubias01_Stage1_c1_c0_c0_c0_c1_c0;
- } else {
- scale = uscale23_Stage1_c1_c0_c0_c0_c1_c0;
- bias = ubias23_Stage1_c1_c0_c0_c0_c1_c0;
- }
- _sample1464_c1_c0_c0_c0 = t * scale + bias;
- return _sample1464_c1_c0_c0_c0;
-}
-mediump vec4 stage_Stage1_c1_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1992_c1_c0;
- mediump vec4 _sample453_c1_c0_c0_c0;
- _sample453_c1_c0_c0_c0 = stage_Stage1_c1_c0_c0_c0_c0_c0(vec4(1.0));
- mediump vec4 t = _sample453_c1_c0_c0_c0;
- {
- {
- t.x = fract(t.x);
- }
- mediump vec4 _sample1464_c1_c0_c0_c0;
- _sample1464_c1_c0_c0_c0 = stage_Stage1_c1_c0_c0_c0_c1_c0(t);
- _sample1992_c1_c0 = _sample1464_c1_c0_c0_c0;
- }
- {
- _sample1992_c1_c0.xyz *= _sample1992_c1_c0.w;
- }
- return _sample1992_c1_c0;
-}
-mediump vec4 stage_Stage1_c1_c0(mediump vec4 _input) {
- mediump vec4 xfer_dst;
- mediump vec4 _sample1992_c1_c0;
- _sample1992_c1_c0 = stage_Stage1_c1_c0_c0_c0(vec4(1.0, 1.0, 1.0, 1.0));
- xfer_dst = _sample1992_c1_c0;
- return xfer_dst;
-}
-void main() {
- mediump vec4 outputColor_Stage0;
- {
- outputColor_Stage0 = vcolor_Stage0;
- }
- mediump vec4 output_Stage1;
- {
- mediump vec4 inputColor = vec4(outputColor_Stage0.xyz, 1.0);
- mediump vec4 xfer_src;
- xfer_src = stage_Stage1_c0_c0(inputColor);
- mediump vec4 xfer_dst;
- xfer_dst = stage_Stage1_c1_c0(inputColor);
- output_Stage1 = xfer_src * xfer_dst.w;
- output_Stage1 *= outputColor_Stage0.w;
- }
- {
- gl_FragColor = output_Stage1;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_color_scale_texture_threshold_2.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_color_scale_texture_threshold_2.glsl
deleted file mode 100644
index 563d103..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_color_scale_texture_threshold_2.glsl
+++ /dev/null
@@ -1,89 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform highp vec4 uscale01_Stage1_c1_c0_c0_c0_c1_c0;
-uniform highp vec4 ubias01_Stage1_c1_c0_c0_c0_c1_c0;
-uniform highp vec4 uscale23_Stage1_c1_c0_c0_c0_c1_c0;
-uniform highp vec4 ubias23_Stage1_c1_c0_c0_c0_c1_c0;
-uniform mediump float uthreshold_Stage1_c1_c0_c0_c0_c1_c0;
-uniform sampler2D uTextureSampler_0_Stage1;
-varying highp vec2 vTransformedCoords_0_Stage0;
-varying highp vec2 vTransformedCoords_1_Stage0;
-varying mediump vec4 vcolor_Stage0;
-mediump vec4 stage_Stage1_c0_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 child_c0_c0;
- child_c0_c0 = _input * texture2D(uTextureSampler_0_Stage1, vTransformedCoords_0_Stage0);
- return child_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 xfer_src;
- mediump vec4 child_c0_c0;
- child_c0_c0 = stage_Stage1_c0_c0_c0_c0(vec4(1.0));
- xfer_src = child_c0_c0 * _input.w;
- return xfer_src;
-}
-mediump vec4 stage_Stage1_c1_c0_c0_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample453_c1_c0_c0_c0;
- mediump float t = vTransformedCoords_1_Stage0.x + 9.999999747378752e-06;
- _sample453_c1_c0_c0_c0 = vec4(t, 1.0, 0.0, 0.0);
- return _sample453_c1_c0_c0_c0;
-}
-mediump vec4 stage_Stage1_c1_c0_c0_c0_c1_c0(mediump vec4 _input) {
- mediump vec4 _sample1464_c1_c0_c0_c0;
- mediump float t = _input.x;
- highp vec4 scale, bias;
- if (t < uthreshold_Stage1_c1_c0_c0_c0_c1_c0) {
- scale = uscale01_Stage1_c1_c0_c0_c0_c1_c0;
- bias = ubias01_Stage1_c1_c0_c0_c0_c1_c0;
- } else {
- scale = uscale23_Stage1_c1_c0_c0_c0_c1_c0;
- bias = ubias23_Stage1_c1_c0_c0_c0_c1_c0;
- }
- _sample1464_c1_c0_c0_c0 = t * scale + bias;
- return _sample1464_c1_c0_c0_c0;
-}
-mediump vec4 stage_Stage1_c1_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1992_c1_c0;
- mediump vec4 _sample453_c1_c0_c0_c0;
- _sample453_c1_c0_c0_c0 = stage_Stage1_c1_c0_c0_c0_c0_c0(vec4(1.0));
- mediump vec4 t = _sample453_c1_c0_c0_c0;
- {
- {
- t.x = fract(t.x);
- }
- mediump vec4 _sample1464_c1_c0_c0_c0;
- _sample1464_c1_c0_c0_c0 = stage_Stage1_c1_c0_c0_c0_c1_c0(t);
- _sample1992_c1_c0 = _sample1464_c1_c0_c0_c0;
- }
- {
- _sample1992_c1_c0.xyz *= _sample1992_c1_c0.w;
- }
- return _sample1992_c1_c0;
-}
-mediump vec4 stage_Stage1_c1_c0(mediump vec4 _input) {
- mediump vec4 xfer_dst;
- mediump vec4 _sample1992_c1_c0;
- _sample1992_c1_c0 = stage_Stage1_c1_c0_c0_c0(vec4(1.0, 1.0, 1.0, 1.0));
- xfer_dst = _sample1992_c1_c0;
- return xfer_dst;
-}
-void main() {
- mediump vec4 outputColor_Stage0;
- {
- outputColor_Stage0 = vcolor_Stage0;
- }
- mediump vec4 output_Stage1;
- {
- mediump vec4 inputColor = vec4(outputColor_Stage0.xyz, 1.0);
- mediump vec4 xfer_src;
- xfer_src = stage_Stage1_c0_c0(inputColor);
- mediump vec4 xfer_dst;
- xfer_dst = stage_Stage1_c1_c0(inputColor);
- output_Stage1 = xfer_src * (1.0 - xfer_dst.w);
- output_Stage1 *= outputColor_Stage0.w;
- }
- {
- gl_FragColor = output_Stage1;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_color_scale_texture_threshold_3.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_color_scale_texture_threshold_3.glsl
deleted file mode 100644
index 37c926c..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_color_scale_texture_threshold_3.glsl
+++ /dev/null
@@ -1,80 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform highp vec4 uscale01_Stage2_c1_c0_c0_c0_c1_c0;
-uniform highp vec4 ubias01_Stage2_c1_c0_c0_c0_c1_c0;
-uniform highp vec4 uscale23_Stage2_c1_c0_c0_c0_c1_c0;
-uniform highp vec4 ubias23_Stage2_c1_c0_c0_c0_c1_c0;
-uniform mediump float uthreshold_Stage2_c1_c0_c0_c0_c1_c0;
-uniform sampler2D uTextureSampler_0_Stage1;
-varying highp vec2 vTransformedCoords_0_Stage0;
-varying highp vec2 vTransformedCoords_1_Stage0;
-varying mediump vec4 vcolor_Stage0;
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1992;
- _sample1992 = _input * texture2D(uTextureSampler_0_Stage1, vTransformedCoords_0_Stage0);
- return _sample1992;
-}
-mediump vec4 stage_Stage2_c1_c0_c0_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample453_c1_c0_c0_c0;
- mediump float t = vTransformedCoords_1_Stage0.x + 9.999999747378752e-06;
- _sample453_c1_c0_c0_c0 = vec4(t, 1.0, 0.0, 0.0);
- return _sample453_c1_c0_c0_c0;
-}
-mediump vec4 stage_Stage2_c1_c0_c0_c0_c1_c0(mediump vec4 _input) {
- mediump vec4 _sample1464_c1_c0_c0_c0;
- mediump float t = _input.x;
- highp vec4 scale, bias;
- if (t < uthreshold_Stage2_c1_c0_c0_c0_c1_c0) {
- scale = uscale01_Stage2_c1_c0_c0_c0_c1_c0;
- bias = ubias01_Stage2_c1_c0_c0_c0_c1_c0;
- } else {
- scale = uscale23_Stage2_c1_c0_c0_c0_c1_c0;
- bias = ubias23_Stage2_c1_c0_c0_c0_c1_c0;
- }
- _sample1464_c1_c0_c0_c0 = t * scale + bias;
- return _sample1464_c1_c0_c0_c0;
-}
-mediump vec4 stage_Stage2_c1_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 child_c1_c0;
- mediump vec4 _sample453_c1_c0_c0_c0;
- _sample453_c1_c0_c0_c0 = stage_Stage2_c1_c0_c0_c0_c0_c0(vec4(1.0));
- mediump vec4 t = _sample453_c1_c0_c0_c0;
- {
- {
- t.x = fract(t.x);
- }
- mediump vec4 _sample1464_c1_c0_c0_c0;
- _sample1464_c1_c0_c0_c0 = stage_Stage2_c1_c0_c0_c0_c1_c0(t);
- child_c1_c0 = _sample1464_c1_c0_c0_c0;
- }
- {
- child_c1_c0.xyz *= child_c1_c0.w;
- }
- return child_c1_c0;
-}
-mediump vec4 stage_Stage2_c1_c0(mediump vec4 _input) {
- mediump vec4 child;
- mediump vec4 child_c1_c0;
- child_c1_c0 = stage_Stage2_c1_c0_c0_c0(vec4(1.0));
- child = child_c1_c0 * _input.w;
- return child;
-}
-void main() {
- mediump vec4 output_Stage1;
- {
- mediump vec4 _sample1992;
- _sample1992 = stage_Stage1_c0_c0(vec4(1.0, 1.0, 1.0, 1.0));
- output_Stage1 = _sample1992;
- }
- mediump vec4 output_Stage2;
- {
- mediump vec4 child;
- child = stage_Stage2_c1_c0(vec4(1.0));
- output_Stage2 = vec4(child.w);
- }
- {
- gl_FragColor = output_Stage1 * output_Stage2;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_color_scale_texture_threshold_4.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_color_scale_texture_threshold_4.glsl
deleted file mode 100644
index aa9cd37..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_color_scale_texture_threshold_4.glsl
+++ /dev/null
@@ -1,71 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform highp vec4 uscale01_Stage1_c0_c0_c0_c0_c1_c0;
-uniform highp vec4 ubias01_Stage1_c0_c0_c0_c0_c1_c0;
-uniform highp vec4 uscale23_Stage1_c0_c0_c0_c0_c1_c0;
-uniform highp vec4 ubias23_Stage1_c0_c0_c0_c0_c1_c0;
-uniform mediump float uthreshold_Stage1_c0_c0_c0_c0_c1_c0;
-varying highp vec2 vTransformedCoords_0_Stage0;
-varying mediump vec4 vcolor_Stage0;
-mediump vec4 stage_Stage1_c0_c0_c0_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample453_c0_c0_c0_c0;
- mediump float t = vTransformedCoords_0_Stage0.x + 9.999999747378752e-06;
- _sample453_c0_c0_c0_c0 = vec4(t, 1.0, 0.0, 0.0);
- return _sample453_c0_c0_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0_c0_c0_c1_c0(mediump vec4 _input) {
- mediump vec4 _sample1464_c0_c0_c0_c0;
- mediump float t = _input.x;
- highp vec4 scale, bias;
- if (t < uthreshold_Stage1_c0_c0_c0_c0_c1_c0) {
- scale = uscale01_Stage1_c0_c0_c0_c0_c1_c0;
- bias = ubias01_Stage1_c0_c0_c0_c0_c1_c0;
- } else {
- scale = uscale23_Stage1_c0_c0_c0_c0_c1_c0;
- bias = ubias23_Stage1_c0_c0_c0_c0_c1_c0;
- }
- _sample1464_c0_c0_c0_c0 = t * scale + bias;
- return _sample1464_c0_c0_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 child_c0_c0;
- mediump vec4 _sample453_c0_c0_c0_c0;
- _sample453_c0_c0_c0_c0 = stage_Stage1_c0_c0_c0_c0_c0_c0(vec4(1.0));
- mediump vec4 t = _sample453_c0_c0_c0_c0;
- {
- {
- t.x = fract(t.x);
- }
- mediump vec4 _sample1464_c0_c0_c0_c0;
- _sample1464_c0_c0_c0_c0 = stage_Stage1_c0_c0_c0_c0_c1_c0(t);
- child_c0_c0 = _sample1464_c0_c0_c0_c0;
- }
- {
- child_c0_c0.xyz *= child_c0_c0.w;
- }
- return child_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 child;
- mediump vec4 child_c0_c0;
- child_c0_c0 = stage_Stage1_c0_c0_c0_c0(vec4(1.0));
- child = child_c0_c0 * _input.w;
- return child;
-}
-void main() {
- mediump vec4 outputColor_Stage0;
- {
- outputColor_Stage0 = vcolor_Stage0;
- }
- mediump vec4 output_Stage1;
- {
- mediump vec4 child;
- child = stage_Stage1_c0_c0(vec4(1.0));
- output_Stage1 = vec4(child.w);
- }
- {
- gl_FragColor = outputColor_Stage0 * output_Stage1;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_color_scale_texture_thresholds.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_color_scale_texture_thresholds.glsl
deleted file mode 100644
index 5eedd12..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_color_scale_texture_thresholds.glsl
+++ /dev/null
@@ -1,92 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform highp vec4 uscale0_1_Stage2_c1_c0_c0_c0_c1_c0;
-uniform highp vec4 uscale2_3_Stage2_c1_c0_c0_c0_c1_c0;
-uniform highp vec4 uscale4_5_Stage2_c1_c0_c0_c0_c1_c0;
-uniform highp vec4 ubias0_1_Stage2_c1_c0_c0_c0_c1_c0;
-uniform highp vec4 ubias2_3_Stage2_c1_c0_c0_c0_c1_c0;
-uniform highp vec4 ubias4_5_Stage2_c1_c0_c0_c0_c1_c0;
-uniform mediump vec4 uthresholds1_7_Stage2_c1_c0_c0_c0_c1_c0;
-uniform mediump vec4 uthresholds9_13_Stage2_c1_c0_c0_c0_c1_c0;
-uniform sampler2D uTextureSampler_0_Stage1;
-varying highp vec2 vTransformedCoords_0_Stage0;
-varying highp vec2 vTransformedCoords_1_Stage0;
-varying mediump vec4 vcolor_Stage0;
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1992;
- _sample1992 = _input * texture2D(uTextureSampler_0_Stage1, vTransformedCoords_0_Stage0);
- return _sample1992;
-}
-mediump vec4 stage_Stage2_c1_c0_c0_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample453_c1_c0_c0_c0;
- mediump float t = vTransformedCoords_1_Stage0.x + 9.999999747378752e-06;
- _sample453_c1_c0_c0_c0 = vec4(t, 1.0, 0.0, 0.0);
- return _sample453_c1_c0_c0_c0;
-}
-mediump vec4 stage_Stage2_c1_c0_c0_c0_c1_c0(mediump vec4 _input) {
- mediump vec4 _sample1464_c1_c0_c0_c0;
- mediump float t = _input.x;
- highp vec4 scale, bias;
- {
- if (t < uthresholds1_7_Stage2_c1_c0_c0_c0_c1_c0.y) {
- if (t < uthresholds1_7_Stage2_c1_c0_c0_c0_c1_c0.x) {
- scale = uscale0_1_Stage2_c1_c0_c0_c0_c1_c0;
- bias = ubias0_1_Stage2_c1_c0_c0_c0_c1_c0;
- } else {
- scale = uscale2_3_Stage2_c1_c0_c0_c0_c1_c0;
- bias = ubias2_3_Stage2_c1_c0_c0_c0_c1_c0;
- }
- } else {
- {
- scale = uscale4_5_Stage2_c1_c0_c0_c0_c1_c0;
- bias = ubias4_5_Stage2_c1_c0_c0_c0_c1_c0;
- }
- }
- }
- _sample1464_c1_c0_c0_c0 = t * scale + bias;
- return _sample1464_c1_c0_c0_c0;
-}
-mediump vec4 stage_Stage2_c1_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 child_c1_c0;
- mediump vec4 _sample453_c1_c0_c0_c0;
- _sample453_c1_c0_c0_c0 = stage_Stage2_c1_c0_c0_c0_c0_c0(vec4(1.0));
- mediump vec4 t = _sample453_c1_c0_c0_c0;
- {
- {
- t.x = fract(t.x);
- }
- mediump vec4 _sample1464_c1_c0_c0_c0;
- _sample1464_c1_c0_c0_c0 = stage_Stage2_c1_c0_c0_c0_c1_c0(t);
- child_c1_c0 = _sample1464_c1_c0_c0_c0;
- }
- {
- child_c1_c0.xyz *= child_c1_c0.w;
- }
- return child_c1_c0;
-}
-mediump vec4 stage_Stage2_c1_c0(mediump vec4 _input) {
- mediump vec4 child;
- mediump vec4 child_c1_c0;
- child_c1_c0 = stage_Stage2_c1_c0_c0_c0(vec4(1.0));
- child = child_c1_c0 * _input.w;
- return child;
-}
-void main() {
- mediump vec4 output_Stage1;
- {
- mediump vec4 _sample1992;
- _sample1992 = stage_Stage1_c0_c0(vec4(1.0, 1.0, 1.0, 1.0));
- output_Stage1 = _sample1992;
- }
- mediump vec4 output_Stage2;
- {
- mediump vec4 child;
- child = stage_Stage2_c1_c0(vec4(1.0));
- output_Stage2 = vec4(child.w);
- }
- {
- gl_FragColor = output_Stage1 * output_Stage2;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_color_scale_texture_thresholds_2.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_color_scale_texture_thresholds_2.glsl
deleted file mode 100644
index ddacd34..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_color_scale_texture_thresholds_2.glsl
+++ /dev/null
@@ -1,97 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform highp vec4 uscale0_1_Stage2_c1_c0_c0_c0_c1_c0;
-uniform highp vec4 uscale2_3_Stage2_c1_c0_c0_c0_c1_c0;
-uniform highp vec4 uscale4_5_Stage2_c1_c0_c0_c0_c1_c0;
-uniform highp vec4 uscale6_7_Stage2_c1_c0_c0_c0_c1_c0;
-uniform highp vec4 ubias0_1_Stage2_c1_c0_c0_c0_c1_c0;
-uniform highp vec4 ubias2_3_Stage2_c1_c0_c0_c0_c1_c0;
-uniform highp vec4 ubias4_5_Stage2_c1_c0_c0_c0_c1_c0;
-uniform highp vec4 ubias6_7_Stage2_c1_c0_c0_c0_c1_c0;
-uniform mediump vec4 uthresholds1_7_Stage2_c1_c0_c0_c0_c1_c0;
-uniform mediump vec4 uthresholds9_13_Stage2_c1_c0_c0_c0_c1_c0;
-uniform sampler2D uTextureSampler_0_Stage1;
-varying highp vec2 vTransformedCoords_0_Stage0;
-varying highp vec2 vTransformedCoords_1_Stage0;
-varying mediump vec4 vcolor_Stage0;
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1992;
- _sample1992 = _input * texture2D(uTextureSampler_0_Stage1, vTransformedCoords_0_Stage0);
- return _sample1992;
-}
-mediump vec4 stage_Stage2_c1_c0_c0_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample453_c1_c0_c0_c0;
- mediump float t = vTransformedCoords_1_Stage0.x + 9.999999747378752e-06;
- _sample453_c1_c0_c0_c0 = vec4(t, 1.0, 0.0, 0.0);
- return _sample453_c1_c0_c0_c0;
-}
-mediump vec4 stage_Stage2_c1_c0_c0_c0_c1_c0(mediump vec4 _input) {
- mediump vec4 _sample1464_c1_c0_c0_c0;
- mediump float t = _input.x;
- highp vec4 scale, bias;
- {
- if (t < uthresholds1_7_Stage2_c1_c0_c0_c0_c1_c0.y) {
- if (t < uthresholds1_7_Stage2_c1_c0_c0_c0_c1_c0.x) {
- scale = uscale0_1_Stage2_c1_c0_c0_c0_c1_c0;
- bias = ubias0_1_Stage2_c1_c0_c0_c0_c1_c0;
- } else {
- scale = uscale2_3_Stage2_c1_c0_c0_c0_c1_c0;
- bias = ubias2_3_Stage2_c1_c0_c0_c0_c1_c0;
- }
- } else {
- if (t < uthresholds1_7_Stage2_c1_c0_c0_c0_c1_c0.z) {
- scale = uscale4_5_Stage2_c1_c0_c0_c0_c1_c0;
- bias = ubias4_5_Stage2_c1_c0_c0_c0_c1_c0;
- } else {
- scale = uscale6_7_Stage2_c1_c0_c0_c0_c1_c0;
- bias = ubias6_7_Stage2_c1_c0_c0_c0_c1_c0;
- }
- }
- }
- _sample1464_c1_c0_c0_c0 = t * scale + bias;
- return _sample1464_c1_c0_c0_c0;
-}
-mediump vec4 stage_Stage2_c1_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 child_c1_c0;
- mediump vec4 _sample453_c1_c0_c0_c0;
- _sample453_c1_c0_c0_c0 = stage_Stage2_c1_c0_c0_c0_c0_c0(vec4(1.0));
- mediump vec4 t = _sample453_c1_c0_c0_c0;
- {
- {
- t.x = fract(t.x);
- }
- mediump vec4 _sample1464_c1_c0_c0_c0;
- _sample1464_c1_c0_c0_c0 = stage_Stage2_c1_c0_c0_c0_c1_c0(t);
- child_c1_c0 = _sample1464_c1_c0_c0_c0;
- }
- {
- child_c1_c0.xyz *= child_c1_c0.w;
- }
- return child_c1_c0;
-}
-mediump vec4 stage_Stage2_c1_c0(mediump vec4 _input) {
- mediump vec4 child;
- mediump vec4 child_c1_c0;
- child_c1_c0 = stage_Stage2_c1_c0_c0_c0(vec4(1.0));
- child = child_c1_c0 * _input.w;
- return child;
-}
-void main() {
- mediump vec4 output_Stage1;
- {
- mediump vec4 _sample1992;
- _sample1992 = stage_Stage1_c0_c0(vec4(1.0, 1.0, 1.0, 1.0));
- output_Stage1 = _sample1992;
- }
- mediump vec4 output_Stage2;
- {
- mediump vec4 child;
- child = stage_Stage2_c1_c0(vec4(1.0));
- output_Stage2 = vec4(child.w);
- }
- {
- gl_FragColor = output_Stage1 * output_Stage2;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_color_scale_thresholds.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_color_scale_thresholds.glsl
deleted file mode 100644
index 8dd97f4..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bias_color_scale_thresholds.glsl
+++ /dev/null
@@ -1,83 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform highp vec4 uscale0_1_Stage1_c0_c0_c0_c0_c1_c0;
-uniform highp vec4 uscale2_3_Stage1_c0_c0_c0_c0_c1_c0;
-uniform highp vec4 uscale4_5_Stage1_c0_c0_c0_c0_c1_c0;
-uniform highp vec4 ubias0_1_Stage1_c0_c0_c0_c0_c1_c0;
-uniform highp vec4 ubias2_3_Stage1_c0_c0_c0_c0_c1_c0;
-uniform highp vec4 ubias4_5_Stage1_c0_c0_c0_c0_c1_c0;
-uniform mediump vec4 uthresholds1_7_Stage1_c0_c0_c0_c0_c1_c0;
-uniform mediump vec4 uthresholds9_13_Stage1_c0_c0_c0_c0_c1_c0;
-varying highp vec2 vTransformedCoords_0_Stage0;
-varying mediump vec4 vcolor_Stage0;
-mediump vec4 stage_Stage1_c0_c0_c0_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample453_c0_c0_c0_c0;
- mediump float t = vTransformedCoords_0_Stage0.x + 9.999999747378752e-06;
- _sample453_c0_c0_c0_c0 = vec4(t, 1.0, 0.0, 0.0);
- return _sample453_c0_c0_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0_c0_c0_c1_c0(mediump vec4 _input) {
- mediump vec4 _sample1464_c0_c0_c0_c0;
- mediump float t = _input.x;
- highp vec4 scale, bias;
- {
- if (t < uthresholds1_7_Stage1_c0_c0_c0_c0_c1_c0.y) {
- if (t < uthresholds1_7_Stage1_c0_c0_c0_c0_c1_c0.x) {
- scale = uscale0_1_Stage1_c0_c0_c0_c0_c1_c0;
- bias = ubias0_1_Stage1_c0_c0_c0_c0_c1_c0;
- } else {
- scale = uscale2_3_Stage1_c0_c0_c0_c0_c1_c0;
- bias = ubias2_3_Stage1_c0_c0_c0_c0_c1_c0;
- }
- } else {
- {
- scale = uscale4_5_Stage1_c0_c0_c0_c0_c1_c0;
- bias = ubias4_5_Stage1_c0_c0_c0_c0_c1_c0;
- }
- }
- }
- _sample1464_c0_c0_c0_c0 = t * scale + bias;
- return _sample1464_c0_c0_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 child_c0_c0;
- mediump vec4 _sample453_c0_c0_c0_c0;
- _sample453_c0_c0_c0_c0 = stage_Stage1_c0_c0_c0_c0_c0_c0(vec4(1.0));
- mediump vec4 t = _sample453_c0_c0_c0_c0;
- {
- {
- t.x = fract(t.x);
- }
- mediump vec4 _sample1464_c0_c0_c0_c0;
- _sample1464_c0_c0_c0_c0 = stage_Stage1_c0_c0_c0_c0_c1_c0(t);
- child_c0_c0 = _sample1464_c0_c0_c0_c0;
- }
- {
- child_c0_c0.xyz *= child_c0_c0.w;
- }
- return child_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 child;
- mediump vec4 child_c0_c0;
- child_c0_c0 = stage_Stage1_c0_c0_c0_c0(vec4(1.0));
- child = child_c0_c0 * _input.w;
- return child;
-}
-void main() {
- mediump vec4 outputColor_Stage0;
- {
- outputColor_Stage0 = vcolor_Stage0;
- }
- mediump vec4 output_Stage1;
- {
- mediump vec4 child;
- child = stage_Stage1_c0_c0(vec4(1.0));
- output_Stage1 = vec4(child.w);
- }
- {
- gl_FragColor = outputColor_Stage0 * output_Stage1;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_border_circle_color.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_border_circle_color.glsl
deleted file mode 100644
index 77e95c3..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_border_circle_color.glsl
+++ /dev/null
@@ -1,61 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uleftBorderColor_Stage1_c0_c0;
-uniform mediump vec4 urightBorderColor_Stage1_c0_c0;
-uniform mediump vec4 ustart_Stage1_c0_c0_c1_c0;
-uniform mediump vec4 uend_Stage1_c0_c0_c1_c0;
-varying highp vec4 vinCircleEdge_Stage0;
-varying mediump vec4 vinColor_Stage0;
-varying highp vec2 vTransformedCoords_0_Stage0;
-mediump vec4 stage_Stage1_c0_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1099_c0_c0;
- mediump float t = length(vTransformedCoords_0_Stage0);
- _sample1099_c0_c0 = vec4(t, 1.0, 0.0, 0.0);
- return _sample1099_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0_c1_c0(mediump vec4 _input) {
- mediump vec4 _sample1767_c0_c0;
- mediump float t = _input.x;
- _sample1767_c0_c0 = (1.0 - t) * ustart_Stage1_c0_c0_c1_c0 + t * uend_Stage1_c0_c0_c1_c0;
- return _sample1767_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 child;
- mediump vec4 _sample1099_c0_c0;
- _sample1099_c0_c0 = stage_Stage1_c0_c0_c0_c0(vec4(1.0));
- mediump vec4 t = _sample1099_c0_c0;
- if (t.x < 0.0) {
- child = uleftBorderColor_Stage1_c0_c0;
- } else if (t.x > 1.0) {
- child = urightBorderColor_Stage1_c0_c0;
- } else {
- mediump vec4 _sample1767_c0_c0;
- _sample1767_c0_c0 = stage_Stage1_c0_c0_c1_c0(t);
- child = _sample1767_c0_c0;
- }
- return child;
-}
-void main() {
- mediump vec4 outputColor_Stage0;
- mediump vec4 outputCoverage_Stage0;
- {
- highp vec4 circleEdge;
- circleEdge = vinCircleEdge_Stage0;
- outputColor_Stage0 = vinColor_Stage0;
- highp float d = length(circleEdge.xy);
- mediump float distanceToOuterEdge = circleEdge.z * (1.0 - d);
- mediump float edgeAlpha = clamp(distanceToOuterEdge, 0.0, 1.0);
- outputCoverage_Stage0 = vec4(edgeAlpha);
- }
- mediump vec4 output_Stage1;
- {
- mediump vec4 child;
- child = stage_Stage1_c0_c0(vec4(1.0));
- output_Stage1 = child * outputColor_Stage0.w;
- }
- {
- gl_FragColor = output_Stage1 * outputCoverage_Stage0;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_border_color.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_border_color.glsl
deleted file mode 100644
index d0e6ce0..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_border_color.glsl
+++ /dev/null
@@ -1,49 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uleftBorderColor_Stage1_c0_c0;
-uniform mediump vec4 urightBorderColor_Stage1_c0_c0;
-uniform mediump vec4 ustart_Stage1_c0_c0_c1_c0;
-uniform mediump vec4 uend_Stage1_c0_c0_c1_c0;
-varying highp vec2 vTransformedCoords_0_Stage0;
-varying mediump vec4 vcolor_Stage0;
-mediump vec4 stage_Stage1_c0_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1099_c0_c0;
- mediump float t = vTransformedCoords_0_Stage0.x + 9.999999747378752e-06;
- _sample1099_c0_c0 = vec4(t, 1.0, 0.0, 0.0);
- return _sample1099_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0_c1_c0(mediump vec4 _input) {
- mediump vec4 _sample1767_c0_c0;
- mediump float t = _input.x;
- _sample1767_c0_c0 = (1.0 - t) * ustart_Stage1_c0_c0_c1_c0 + t * uend_Stage1_c0_c0_c1_c0;
- return _sample1767_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1992;
- mediump vec4 _sample1099_c0_c0;
- _sample1099_c0_c0 = stage_Stage1_c0_c0_c0_c0(vec4(1.0));
- mediump vec4 t = _sample1099_c0_c0;
- if (t.x < 0.0) {
- _sample1992 = uleftBorderColor_Stage1_c0_c0;
- } else if (t.x > 1.0) {
- _sample1992 = urightBorderColor_Stage1_c0_c0;
- } else {
- mediump vec4 _sample1767_c0_c0;
- _sample1767_c0_c0 = stage_Stage1_c0_c0_c1_c0(t);
- _sample1992 = _sample1767_c0_c0;
- }
- return _sample1992;
-}
-void main() {
- mediump vec4 output_Stage1;
- {
- mediump vec4 _sample1992;
- _sample1992 = stage_Stage1_c0_c0(vec4(1.0, 1.0, 1.0, 1.0));
- output_Stage1 = _sample1992;
- }
- {
- gl_FragColor = output_Stage1;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_border_color_2.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_border_color_2.glsl
deleted file mode 100644
index 8f365d1..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_border_color_2.glsl
+++ /dev/null
@@ -1,49 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uleftBorderColor_Stage1_c0_c0;
-uniform mediump vec4 urightBorderColor_Stage1_c0_c0;
-uniform mediump vec4 ustart_Stage1_c0_c0_c1_c0;
-uniform mediump vec4 uend_Stage1_c0_c0_c1_c0;
-varying highp vec2 vTransformedCoords_0_Stage0;
-varying mediump vec4 vcolor_Stage0;
-mediump vec4 stage_Stage1_c0_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1099_c0_c0;
- mediump float t = length(vTransformedCoords_0_Stage0);
- _sample1099_c0_c0 = vec4(t, 1.0, 0.0, 0.0);
- return _sample1099_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0_c1_c0(mediump vec4 _input) {
- mediump vec4 _sample1767_c0_c0;
- mediump float t = _input.x;
- _sample1767_c0_c0 = (1.0 - t) * ustart_Stage1_c0_c0_c1_c0 + t * uend_Stage1_c0_c0_c1_c0;
- return _sample1767_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1992;
- mediump vec4 _sample1099_c0_c0;
- _sample1099_c0_c0 = stage_Stage1_c0_c0_c0_c0(vec4(1.0));
- mediump vec4 t = _sample1099_c0_c0;
- if (t.x < 0.0) {
- _sample1992 = uleftBorderColor_Stage1_c0_c0;
- } else if (t.x > 1.0) {
- _sample1992 = urightBorderColor_Stage1_c0_c0;
- } else {
- mediump vec4 _sample1767_c0_c0;
- _sample1767_c0_c0 = stage_Stage1_c0_c0_c1_c0(t);
- _sample1992 = _sample1767_c0_c0;
- }
- return _sample1992;
-}
-void main() {
- mediump vec4 output_Stage1;
- {
- mediump vec4 _sample1992;
- _sample1992 = stage_Stage1_c0_c0(vec4(1.0, 1.0, 1.0, 1.0));
- output_Stage1 = _sample1992;
- }
- {
- gl_FragColor = output_Stage1;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_border_color_3.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_border_color_3.glsl
deleted file mode 100644
index 4cf129e..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_border_color_3.glsl
+++ /dev/null
@@ -1,53 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uleftBorderColor_Stage1_c0_c0;
-uniform mediump vec4 urightBorderColor_Stage1_c0_c0;
-uniform mediump vec4 ustart_Stage1_c0_c0_c1_c0;
-uniform mediump vec4 uend_Stage1_c0_c0_c1_c0;
-varying highp vec2 vTransformedCoords_0_Stage0;
-varying mediump vec4 vcolor_Stage0;
-mediump vec4 stage_Stage1_c0_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1099_c0_c0;
- mediump float t = length(vTransformedCoords_0_Stage0);
- _sample1099_c0_c0 = vec4(t, 1.0, 0.0, 0.0);
- return _sample1099_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0_c1_c0(mediump vec4 _input) {
- mediump vec4 _sample1767_c0_c0;
- mediump float t = _input.x;
- _sample1767_c0_c0 = (1.0 - t) * ustart_Stage1_c0_c0_c1_c0 + t * uend_Stage1_c0_c0_c1_c0;
- return _sample1767_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 child;
- mediump vec4 _sample1099_c0_c0;
- _sample1099_c0_c0 = stage_Stage1_c0_c0_c0_c0(vec4(1.0));
- mediump vec4 t = _sample1099_c0_c0;
- if (t.x < 0.0) {
- child = uleftBorderColor_Stage1_c0_c0;
- } else if (t.x > 1.0) {
- child = urightBorderColor_Stage1_c0_c0;
- } else {
- mediump vec4 _sample1767_c0_c0;
- _sample1767_c0_c0 = stage_Stage1_c0_c0_c1_c0(t);
- child = _sample1767_c0_c0;
- }
- return child;
-}
-void main() {
- mediump vec4 outputColor_Stage0;
- {
- outputColor_Stage0 = vcolor_Stage0;
- }
- mediump vec4 output_Stage1;
- {
- mediump vec4 child;
- child = stage_Stage1_c0_c0(vec4(1.0));
- output_Stage1 = child * outputColor_Stage0.w;
- }
- {
- gl_FragColor = output_Stage1;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_border_color_coverage.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_border_color_coverage.glsl
deleted file mode 100644
index 6adacd9..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_border_color_coverage.glsl
+++ /dev/null
@@ -1,55 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uleftBorderColor_Stage1_c0_c0;
-uniform mediump vec4 urightBorderColor_Stage1_c0_c0;
-uniform mediump vec4 ustart_Stage1_c0_c0_c1_c0;
-uniform mediump vec4 uend_Stage1_c0_c0_c1_c0;
-varying highp vec2 vTransformedCoords_0_Stage0;
-varying mediump vec4 vcolor_Stage0;
-varying highp float vcoverage_Stage0;
-mediump vec4 stage_Stage1_c0_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1099_c0_c0;
- mediump float t = vTransformedCoords_0_Stage0.x + 9.999999747378752e-06;
- _sample1099_c0_c0 = vec4(t, 1.0, 0.0, 0.0);
- return _sample1099_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0_c1_c0(mediump vec4 _input) {
- mediump vec4 _sample1767_c0_c0;
- mediump float t = _input.x;
- _sample1767_c0_c0 = (1.0 - t) * ustart_Stage1_c0_c0_c1_c0 + t * uend_Stage1_c0_c0_c1_c0;
- return _sample1767_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1992;
- mediump vec4 _sample1099_c0_c0;
- _sample1099_c0_c0 = stage_Stage1_c0_c0_c0_c0(vec4(1.0));
- mediump vec4 t = _sample1099_c0_c0;
- if (t.x < 0.0) {
- _sample1992 = uleftBorderColor_Stage1_c0_c0;
- } else if (t.x > 1.0) {
- _sample1992 = urightBorderColor_Stage1_c0_c0;
- } else {
- mediump vec4 _sample1767_c0_c0;
- _sample1767_c0_c0 = stage_Stage1_c0_c0_c1_c0(t);
- _sample1992 = _sample1767_c0_c0;
- }
- return _sample1992;
-}
-void main() {
- mediump vec4 outputCoverage_Stage0;
- {
- highp float coverage = vcoverage_Stage0;
- outputCoverage_Stage0 = vec4(coverage);
- }
- mediump vec4 output_Stage1;
- {
- mediump vec4 _sample1992;
- _sample1992 = stage_Stage1_c0_c0(vec4(1.0, 1.0, 1.0, 1.0));
- output_Stage1 = _sample1992;
- }
- {
- gl_FragColor = output_Stage1 * outputCoverage_Stage0;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_border_color_coverage_texture.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_border_color_coverage_texture.glsl
deleted file mode 100644
index 07a5ac3..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_border_color_coverage_texture.glsl
+++ /dev/null
@@ -1,54 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uleftBorderColor_Stage1_c0_c0;
-uniform mediump vec4 urightBorderColor_Stage1_c0_c0;
-uniform sampler2D uTextureSampler_0_Stage1;
-varying highp vec2 vTransformedCoords_0_Stage0;
-varying mediump vec4 vcolor_Stage0;
-varying highp float vcoverage_Stage0;
-mediump vec4 stage_Stage1_c0_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1099_c0_c0;
- mediump float t = vTransformedCoords_0_Stage0.x + 9.999999747378752e-06;
- _sample1099_c0_c0 = vec4(t, 1.0, 0.0, 0.0);
- return _sample1099_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0_c1_c0(mediump vec4 _input) {
- mediump vec4 _sample1767_c0_c0;
- mediump vec2 coord = vec2(_input.x, 0.5);
- _sample1767_c0_c0 = texture2D(uTextureSampler_0_Stage1, coord);
- return _sample1767_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1992;
- mediump vec4 _sample1099_c0_c0;
- _sample1099_c0_c0 = stage_Stage1_c0_c0_c0_c0(vec4(1.0));
- mediump vec4 t = _sample1099_c0_c0;
- if (t.x < 0.0) {
- _sample1992 = uleftBorderColor_Stage1_c0_c0;
- } else if (t.x > 1.0) {
- _sample1992 = urightBorderColor_Stage1_c0_c0;
- } else {
- mediump vec4 _sample1767_c0_c0;
- _sample1767_c0_c0 = stage_Stage1_c0_c0_c1_c0(t);
- _sample1992 = _sample1767_c0_c0;
- }
- return _sample1992;
-}
-void main() {
- mediump vec4 outputCoverage_Stage0;
- {
- highp float coverage = vcoverage_Stage0;
- outputCoverage_Stage0 = vec4(coverage);
- }
- mediump vec4 output_Stage1;
- {
- mediump vec4 _sample1992;
- _sample1992 = stage_Stage1_c0_c0(vec4(1.0, 1.0, 1.0, 1.0));
- output_Stage1 = _sample1992;
- }
- {
- gl_FragColor = output_Stage1 * outputCoverage_Stage0;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_border_color_decal_ellipse_scaletranslate_texdom_texturew.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_border_color_decal_ellipse_scaletranslate_texdom_texturew.glsl
deleted file mode 100644
index 70771b9..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_border_color_decal_ellipse_scaletranslate_texdom_texturew.glsl
+++ /dev/null
@@ -1,84 +0,0 @@
-#version 100
-
-uniform highp float u_skRTHeight;
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uleftBorderColor_Stage1_c0_c0;
-uniform mediump vec4 urightBorderColor_Stage1_c0_c0;
-uniform mediump vec4 ustart_Stage1_c0_c0_c1_c0;
-uniform mediump vec4 uend_Stage1_c0_c0_c1_c0;
-uniform mediump vec4 uscaleAndTranslate_Stage2;
-uniform mediump vec4 uTexDom_Stage2;
-uniform mediump vec3 uDecalParams_Stage2;
-uniform sampler2D uTextureSampler_0_Stage2;
-varying highp vec3 vEllipseOffsets_Stage0;
-varying highp vec4 vEllipseRadii_Stage0;
-varying mediump vec4 vinColor_Stage0;
-varying highp vec2 vTransformedCoords_0_Stage0;
-mediump vec4 stage_Stage1_c0_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1099_c0_c0;
- mediump float t = length(vTransformedCoords_0_Stage0);
- _sample1099_c0_c0 = vec4(t, 1.0, 0.0, 0.0);
- return _sample1099_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0_c1_c0(mediump vec4 _input) {
- mediump vec4 _sample1767_c0_c0;
- mediump float t = _input.x;
- _sample1767_c0_c0 = (1.0 - t) * ustart_Stage1_c0_c0_c1_c0 + t * uend_Stage1_c0_c0_c1_c0;
- return _sample1767_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1992;
- mediump vec4 _sample1099_c0_c0;
- _sample1099_c0_c0 = stage_Stage1_c0_c0_c0_c0(vec4(1.0));
- mediump vec4 t = _sample1099_c0_c0;
- if (t.x < 0.0) {
- _sample1992 = uleftBorderColor_Stage1_c0_c0;
- } else if (t.x > 1.0) {
- _sample1992 = urightBorderColor_Stage1_c0_c0;
- } else {
- mediump vec4 _sample1767_c0_c0;
- _sample1767_c0_c0 = stage_Stage1_c0_c0_c1_c0(t);
- _sample1992 = _sample1767_c0_c0;
- }
- return _sample1992;
-}
-void main() {
-highp vec4 sk_FragCoord = vec4(gl_FragCoord.x, u_skRTHeight - gl_FragCoord.y, gl_FragCoord.z, gl_FragCoord.w);
- mediump vec4 outputCoverage_Stage0;
- {
- highp vec2 offset = vEllipseOffsets_Stage0.xy;
- highp float test = dot(offset, offset) - 1.0;
- highp vec2 grad = (2.0 * offset) * (vEllipseOffsets_Stage0.z * vEllipseRadii_Stage0.xy);
- highp float grad_dot = dot(grad, grad);
- grad_dot = max(grad_dot, 6.103600026108325e-05);
- highp float invlen = vEllipseOffsets_Stage0.z * inversesqrt(grad_dot);
- highp float edgeAlpha = clamp(0.5 - test * invlen, 0.0, 1.0);
- outputCoverage_Stage0 = vec4(edgeAlpha);
- }
- mediump vec4 output_Stage1;
- {
- mediump vec4 _sample1992;
- _sample1992 = stage_Stage1_c0_c0(vec4(1.0, 1.0, 1.0, 1.0));
- output_Stage1 = _sample1992;
- }
- mediump vec4 output_Stage2;
- {
- mediump vec2 coords = sk_FragCoord.xy * uscaleAndTranslate_Stage2.xy + uscaleAndTranslate_Stage2.zw;
- {
- highp vec2 origCoord = coords;
- highp vec2 clampedCoord = clamp(origCoord, uTexDom_Stage2.xy, uTexDom_Stage2.zw);
- mediump vec4 inside = texture2D(uTextureSampler_0_Stage2, clampedCoord).wwww * outputCoverage_Stage0;
- mediump float err = max(abs(clampedCoord.x - origCoord.x) * uDecalParams_Stage2.x, abs(clampedCoord.y - origCoord.y) * uDecalParams_Stage2.y);
- if (err > uDecalParams_Stage2.z) {
- err = 1.0;
- } else if (uDecalParams_Stage2.z < 1.0) {
- err = 0.0;
- }
- output_Stage2 = mix(inside, vec4(0.0, 0.0, 0.0, 0.0), err);
- }
- }
- {
- gl_FragColor = output_Stage1 * output_Stage2;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_border_color_quad_texture.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_border_color_quad_texture.glsl
deleted file mode 100644
index 3c26f70..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_border_color_quad_texture.glsl
+++ /dev/null
@@ -1,64 +0,0 @@
-#version 100
-
-#extension GL_OES_standard_derivatives : require
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uleftBorderColor_Stage1_c0_c0;
-uniform mediump vec4 urightBorderColor_Stage1_c0_c0;
-uniform sampler2D uTextureSampler_0_Stage1;
-varying mediump vec4 vQuadEdge_Stage0;
-varying mediump vec4 vinColor_Stage0;
-varying highp vec2 vTransformedCoords_0_Stage0;
-mediump vec4 stage_Stage1_c0_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1099_c0_c0;
- mediump float t = vTransformedCoords_0_Stage0.x + 9.999999747378752e-06;
- _sample1099_c0_c0 = vec4(t, 1.0, 0.0, 0.0);
- return _sample1099_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0_c1_c0(mediump vec4 _input) {
- mediump vec4 _sample1767_c0_c0;
- mediump vec2 coord = vec2(_input.x, 0.5);
- _sample1767_c0_c0 = texture2D(uTextureSampler_0_Stage1, coord);
- return _sample1767_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1992;
- mediump vec4 _sample1099_c0_c0;
- _sample1099_c0_c0 = stage_Stage1_c0_c0_c0_c0(vec4(1.0));
- mediump vec4 t = _sample1099_c0_c0;
- if (t.x < 0.0) {
- _sample1992 = uleftBorderColor_Stage1_c0_c0;
- } else if (t.x > 1.0) {
- _sample1992 = urightBorderColor_Stage1_c0_c0;
- } else {
- mediump vec4 _sample1767_c0_c0;
- _sample1767_c0_c0 = stage_Stage1_c0_c0_c1_c0(t);
- _sample1992 = _sample1767_c0_c0;
- }
- return _sample1992;
-}
-void main() {
- mediump vec4 outputCoverage_Stage0;
- {
- mediump float edgeAlpha;
- mediump vec2 duvdx = dFdx(vQuadEdge_Stage0.xy);
- mediump vec2 duvdy = -dFdy(vQuadEdge_Stage0.xy);
- if (vQuadEdge_Stage0.z > 0.0 && vQuadEdge_Stage0.w > 0.0) {
- edgeAlpha = min(min(vQuadEdge_Stage0.z, vQuadEdge_Stage0.w) + 0.5, 1.0);
- } else {
- mediump vec2 gF = vec2((2.0 * vQuadEdge_Stage0.x) * duvdx.x - duvdx.y, (2.0 * vQuadEdge_Stage0.x) * duvdy.x - duvdy.y);
- edgeAlpha = vQuadEdge_Stage0.x * vQuadEdge_Stage0.x - vQuadEdge_Stage0.y;
- edgeAlpha = clamp(0.5 - edgeAlpha / length(gF), 0.0, 1.0);
- }
- outputCoverage_Stage0 = vec4(edgeAlpha);
- }
- mediump vec4 output_Stage1;
- {
- mediump vec4 _sample1992;
- _sample1992 = stage_Stage1_c0_c0(vec4(1.0, 1.0, 1.0, 1.0));
- output_Stage1 = _sample1992;
- }
- {
- gl_FragColor = output_Stage1 * outputCoverage_Stage0;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_border_color_quad_texture_2.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_border_color_quad_texture_2.glsl
deleted file mode 100644
index 298ccc0..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_border_color_quad_texture_2.glsl
+++ /dev/null
@@ -1,66 +0,0 @@
-#version 100
-
-#extension GL_OES_standard_derivatives : require
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uleftBorderColor_Stage1_c0_c0;
-uniform mediump vec4 urightBorderColor_Stage1_c0_c0;
-uniform sampler2D uTextureSampler_0_Stage1;
-varying mediump vec4 vQuadEdge_Stage0;
-varying mediump vec4 vinColor_Stage0;
-varying highp vec2 vTransformedCoords_0_Stage0;
-mediump vec4 stage_Stage1_c0_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1099_c0_c0;
- mediump float t = vTransformedCoords_0_Stage0.x + 9.999999747378752e-06;
- _sample1099_c0_c0 = vec4(t, 1.0, 0.0, 0.0);
- return _sample1099_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0_c1_c0(mediump vec4 _input) {
- mediump vec4 _sample1767_c0_c0;
- mediump vec2 coord = vec2(_input.x, 0.5);
- _sample1767_c0_c0 = texture2D(uTextureSampler_0_Stage1, coord);
- return _sample1767_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 child;
- mediump vec4 _sample1099_c0_c0;
- _sample1099_c0_c0 = stage_Stage1_c0_c0_c0_c0(vec4(1.0));
- mediump vec4 t = _sample1099_c0_c0;
- if (t.x < 0.0) {
- child = uleftBorderColor_Stage1_c0_c0;
- } else if (t.x > 1.0) {
- child = urightBorderColor_Stage1_c0_c0;
- } else {
- mediump vec4 _sample1767_c0_c0;
- _sample1767_c0_c0 = stage_Stage1_c0_c0_c1_c0(t);
- child = _sample1767_c0_c0;
- }
- return child;
-}
-void main() {
- mediump vec4 outputColor_Stage0;
- mediump vec4 outputCoverage_Stage0;
- {
- outputColor_Stage0 = vinColor_Stage0;
- mediump float edgeAlpha;
- mediump vec2 duvdx = dFdx(vQuadEdge_Stage0.xy);
- mediump vec2 duvdy = -dFdy(vQuadEdge_Stage0.xy);
- if (vQuadEdge_Stage0.z > 0.0 && vQuadEdge_Stage0.w > 0.0) {
- edgeAlpha = min(min(vQuadEdge_Stage0.z, vQuadEdge_Stage0.w) + 0.5, 1.0);
- } else {
- mediump vec2 gF = vec2((2.0 * vQuadEdge_Stage0.x) * duvdx.x - duvdx.y, (2.0 * vQuadEdge_Stage0.x) * duvdy.x - duvdy.y);
- edgeAlpha = vQuadEdge_Stage0.x * vQuadEdge_Stage0.x - vQuadEdge_Stage0.y;
- edgeAlpha = clamp(0.5 - edgeAlpha / length(gF), 0.0, 1.0);
- }
- outputCoverage_Stage0 = vec4(edgeAlpha);
- }
- mediump vec4 output_Stage1;
- {
- mediump vec4 child;
- child = stage_Stage1_c0_c0(vec4(1.0));
- output_Stage1 = child * outputColor_Stage0.w;
- }
- {
- gl_FragColor = output_Stage1 * outputCoverage_Stage0;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_border_color_texindex_texturew.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_border_color_texindex_texturew.glsl
deleted file mode 100644
index 92cba89..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_border_color_texindex_texturew.glsl
+++ /dev/null
@@ -1,61 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uleftBorderColor_Stage1_c0_c0;
-uniform mediump vec4 urightBorderColor_Stage1_c0_c0;
-uniform sampler2D uTextureSampler_0_Stage0;
-uniform sampler2D uTextureSampler_0_Stage1;
-varying highp vec2 vTextureCoords_Stage0;
-varying highp float vTexIndex_Stage0;
-varying mediump vec4 vinColor_Stage0;
-varying highp vec2 vTransformedCoords_0_Stage0;
-mediump vec4 stage_Stage1_c0_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1099_c0_c0;
- mediump float t = vTransformedCoords_0_Stage0.x + 9.999999747378752e-06;
- _sample1099_c0_c0 = vec4(t, 1.0, 0.0, 0.0);
- return _sample1099_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0_c1_c0(mediump vec4 _input) {
- mediump vec4 _sample1767_c0_c0;
- mediump vec2 coord = vec2(_input.x, 0.5);
- _sample1767_c0_c0 = texture2D(uTextureSampler_0_Stage1, coord);
- return _sample1767_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 child;
- mediump vec4 _sample1099_c0_c0;
- _sample1099_c0_c0 = stage_Stage1_c0_c0_c0_c0(vec4(1.0));
- mediump vec4 t = _sample1099_c0_c0;
- if (t.x < 0.0) {
- child = uleftBorderColor_Stage1_c0_c0;
- } else if (t.x > 1.0) {
- child = urightBorderColor_Stage1_c0_c0;
- } else {
- mediump vec4 _sample1767_c0_c0;
- _sample1767_c0_c0 = stage_Stage1_c0_c0_c1_c0(t);
- child = _sample1767_c0_c0;
- }
- return child;
-}
-void main() {
- mediump vec4 outputColor_Stage0;
- mediump vec4 outputCoverage_Stage0;
- {
- outputColor_Stage0 = vinColor_Stage0;
- mediump vec4 texColor;
- {
- texColor = texture2D(uTextureSampler_0_Stage0, vTextureCoords_Stage0).wwww;
- }
- outputCoverage_Stage0 = texColor;
- }
- mediump vec4 output_Stage1;
- {
- mediump vec4 child;
- child = stage_Stage1_c0_c0(vec4(1.0));
- output_Stage1 = child * outputColor_Stage0.w;
- }
- {
- gl_FragColor = output_Stage1 * outputCoverage_Stage0;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_border_color_texture.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_border_color_texture.glsl
deleted file mode 100644
index 6f7f457..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_border_color_texture.glsl
+++ /dev/null
@@ -1,48 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uleftBorderColor_Stage1_c0_c0;
-uniform mediump vec4 urightBorderColor_Stage1_c0_c0;
-uniform sampler2D uTextureSampler_0_Stage1;
-varying highp vec2 vTransformedCoords_0_Stage0;
-varying mediump vec4 vcolor_Stage0;
-mediump vec4 stage_Stage1_c0_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1099_c0_c0;
- mediump float t = vTransformedCoords_0_Stage0.x + 9.999999747378752e-06;
- _sample1099_c0_c0 = vec4(t, 1.0, 0.0, 0.0);
- return _sample1099_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0_c1_c0(mediump vec4 _input) {
- mediump vec4 _sample1767_c0_c0;
- mediump vec2 coord = vec2(_input.x, 0.5);
- _sample1767_c0_c0 = texture2D(uTextureSampler_0_Stage1, coord);
- return _sample1767_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1992;
- mediump vec4 _sample1099_c0_c0;
- _sample1099_c0_c0 = stage_Stage1_c0_c0_c0_c0(vec4(1.0));
- mediump vec4 t = _sample1099_c0_c0;
- if (t.x < 0.0) {
- _sample1992 = uleftBorderColor_Stage1_c0_c0;
- } else if (t.x > 1.0) {
- _sample1992 = urightBorderColor_Stage1_c0_c0;
- } else {
- mediump vec4 _sample1767_c0_c0;
- _sample1767_c0_c0 = stage_Stage1_c0_c0_c1_c0(t);
- _sample1992 = _sample1767_c0_c0;
- }
- return _sample1992;
-}
-void main() {
- mediump vec4 output_Stage1;
- {
- mediump vec4 _sample1992;
- _sample1992 = stage_Stage1_c0_c0(vec4(1.0, 1.0, 1.0, 1.0));
- output_Stage1 = _sample1992;
- }
- {
- gl_FragColor = output_Stage1;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_border_color_texture_2.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_border_color_texture_2.glsl
deleted file mode 100644
index edebf92..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_border_color_texture_2.glsl
+++ /dev/null
@@ -1,52 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uleftBorderColor_Stage1_c0_c0;
-uniform mediump vec4 urightBorderColor_Stage1_c0_c0;
-uniform sampler2D uTextureSampler_0_Stage1;
-varying highp vec2 vTransformedCoords_0_Stage0;
-varying mediump vec4 vcolor_Stage0;
-mediump vec4 stage_Stage1_c0_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1099_c0_c0;
- mediump float t = vTransformedCoords_0_Stage0.x + 9.999999747378752e-06;
- _sample1099_c0_c0 = vec4(t, 1.0, 0.0, 0.0);
- return _sample1099_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0_c1_c0(mediump vec4 _input) {
- mediump vec4 _sample1767_c0_c0;
- mediump vec2 coord = vec2(_input.x, 0.5);
- _sample1767_c0_c0 = texture2D(uTextureSampler_0_Stage1, coord);
- return _sample1767_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 child;
- mediump vec4 _sample1099_c0_c0;
- _sample1099_c0_c0 = stage_Stage1_c0_c0_c0_c0(vec4(1.0));
- mediump vec4 t = _sample1099_c0_c0;
- if (t.x < 0.0) {
- child = uleftBorderColor_Stage1_c0_c0;
- } else if (t.x > 1.0) {
- child = urightBorderColor_Stage1_c0_c0;
- } else {
- mediump vec4 _sample1767_c0_c0;
- _sample1767_c0_c0 = stage_Stage1_c0_c0_c1_c0(t);
- child = _sample1767_c0_c0;
- }
- return child;
-}
-void main() {
- mediump vec4 outputColor_Stage0;
- {
- outputColor_Stage0 = vcolor_Stage0;
- }
- mediump vec4 output_Stage1;
- {
- mediump vec4 child;
- child = stage_Stage1_c0_c0(vec4(1.0));
- output_Stage1 = child * outputColor_Stage0.w;
- }
- {
- gl_FragColor = output_Stage1;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_border_color_texturew.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_border_color_texturew.glsl
deleted file mode 100644
index df8be8f..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_border_color_texturew.glsl
+++ /dev/null
@@ -1,55 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uleftBorderColor_Stage1_c0_c0;
-uniform mediump vec4 urightBorderColor_Stage1_c0_c0;
-uniform mediump vec4 ustart_Stage1_c0_c0_c1_c0;
-uniform mediump vec4 uend_Stage1_c0_c0_c1_c0;
-uniform sampler2D uTextureSampler_0_Stage2;
-varying highp vec2 vTransformedCoords_0_Stage0;
-varying highp vec2 vTransformedCoords_1_Stage0;
-varying mediump vec4 vcolor_Stage0;
-mediump vec4 stage_Stage1_c0_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1099_c0_c0;
- mediump float t = length(vTransformedCoords_0_Stage0);
- _sample1099_c0_c0 = vec4(t, 1.0, 0.0, 0.0);
- return _sample1099_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0_c1_c0(mediump vec4 _input) {
- mediump vec4 _sample1767_c0_c0;
- mediump float t = _input.x;
- _sample1767_c0_c0 = (1.0 - t) * ustart_Stage1_c0_c0_c1_c0 + t * uend_Stage1_c0_c0_c1_c0;
- return _sample1767_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1992;
- mediump vec4 _sample1099_c0_c0;
- _sample1099_c0_c0 = stage_Stage1_c0_c0_c0_c0(vec4(1.0));
- mediump vec4 t = _sample1099_c0_c0;
- if (t.x < 0.0) {
- _sample1992 = uleftBorderColor_Stage1_c0_c0;
- } else if (t.x > 1.0) {
- _sample1992 = urightBorderColor_Stage1_c0_c0;
- } else {
- mediump vec4 _sample1767_c0_c0;
- _sample1767_c0_c0 = stage_Stage1_c0_c0_c1_c0(t);
- _sample1992 = _sample1767_c0_c0;
- }
- return _sample1992;
-}
-void main() {
- mediump vec4 output_Stage1;
- {
- mediump vec4 _sample1992;
- _sample1992 = stage_Stage1_c0_c0(vec4(1.0, 1.0, 1.0, 1.0));
- output_Stage1 = _sample1992;
- }
- mediump vec4 output_Stage2;
- {
- output_Stage2 = texture2D(uTextureSampler_0_Stage2, vTransformedCoords_1_Stage0).wwww;
- }
- {
- gl_FragColor = output_Stage1 * output_Stage2;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_border_color_texturew_2.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_border_color_texturew_2.glsl
deleted file mode 100644
index 78b3466..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_border_color_texturew_2.glsl
+++ /dev/null
@@ -1,55 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uleftBorderColor_Stage1_c0_c0;
-uniform mediump vec4 urightBorderColor_Stage1_c0_c0;
-uniform mediump vec4 ustart_Stage1_c0_c0_c1_c0;
-uniform mediump vec4 uend_Stage1_c0_c0_c1_c0;
-uniform sampler2D uTextureSampler_0_Stage2;
-varying highp vec2 vTransformedCoords_0_Stage0;
-varying highp vec2 vTransformedCoords_1_Stage0;
-varying mediump vec4 vcolor_Stage0;
-mediump vec4 stage_Stage1_c0_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1099_c0_c0;
- mediump float t = vTransformedCoords_0_Stage0.x + 9.999999747378752e-06;
- _sample1099_c0_c0 = vec4(t, 1.0, 0.0, 0.0);
- return _sample1099_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0_c1_c0(mediump vec4 _input) {
- mediump vec4 _sample1767_c0_c0;
- mediump float t = _input.x;
- _sample1767_c0_c0 = (1.0 - t) * ustart_Stage1_c0_c0_c1_c0 + t * uend_Stage1_c0_c0_c1_c0;
- return _sample1767_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1992;
- mediump vec4 _sample1099_c0_c0;
- _sample1099_c0_c0 = stage_Stage1_c0_c0_c0_c0(vec4(1.0));
- mediump vec4 t = _sample1099_c0_c0;
- if (t.x < 0.0) {
- _sample1992 = uleftBorderColor_Stage1_c0_c0;
- } else if (t.x > 1.0) {
- _sample1992 = urightBorderColor_Stage1_c0_c0;
- } else {
- mediump vec4 _sample1767_c0_c0;
- _sample1767_c0_c0 = stage_Stage1_c0_c0_c1_c0(t);
- _sample1992 = _sample1767_c0_c0;
- }
- return _sample1992;
-}
-void main() {
- mediump vec4 output_Stage1;
- {
- mediump vec4 _sample1992;
- _sample1992 = stage_Stage1_c0_c0(vec4(1.0, 1.0, 1.0, 1.0));
- output_Stage1 = _sample1992;
- }
- mediump vec4 output_Stage2;
- {
- output_Stage2 = texture2D(uTextureSampler_0_Stage2, vTransformedCoords_1_Stage0).wwww;
- }
- {
- gl_FragColor = output_Stage1 * output_Stage2;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_border_color_texturew_3.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_border_color_texturew_3.glsl
deleted file mode 100644
index 4b409d6..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_border_color_texturew_3.glsl
+++ /dev/null
@@ -1,59 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uleftBorderColor_Stage1_c0_c0;
-uniform mediump vec4 urightBorderColor_Stage1_c0_c0;
-uniform mediump vec4 ustart_Stage1_c0_c0_c1_c0;
-uniform mediump vec4 uend_Stage1_c0_c0_c1_c0;
-uniform sampler2D uTextureSampler_0_Stage2;
-varying highp vec2 vTransformedCoords_0_Stage0;
-varying highp vec2 vTransformedCoords_1_Stage0;
-varying mediump vec4 vcolor_Stage0;
-mediump vec4 stage_Stage1_c0_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1099_c0_c0;
- mediump float t = length(vTransformedCoords_0_Stage0);
- _sample1099_c0_c0 = vec4(t, 1.0, 0.0, 0.0);
- return _sample1099_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0_c1_c0(mediump vec4 _input) {
- mediump vec4 _sample1767_c0_c0;
- mediump float t = _input.x;
- _sample1767_c0_c0 = (1.0 - t) * ustart_Stage1_c0_c0_c1_c0 + t * uend_Stage1_c0_c0_c1_c0;
- return _sample1767_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 child;
- mediump vec4 _sample1099_c0_c0;
- _sample1099_c0_c0 = stage_Stage1_c0_c0_c0_c0(vec4(1.0));
- mediump vec4 t = _sample1099_c0_c0;
- if (t.x < 0.0) {
- child = uleftBorderColor_Stage1_c0_c0;
- } else if (t.x > 1.0) {
- child = urightBorderColor_Stage1_c0_c0;
- } else {
- mediump vec4 _sample1767_c0_c0;
- _sample1767_c0_c0 = stage_Stage1_c0_c0_c1_c0(t);
- child = _sample1767_c0_c0;
- }
- return child;
-}
-void main() {
- mediump vec4 outputColor_Stage0;
- {
- outputColor_Stage0 = vcolor_Stage0;
- }
- mediump vec4 output_Stage1;
- {
- mediump vec4 child;
- child = stage_Stage1_c0_c0(vec4(1.0));
- output_Stage1 = child * outputColor_Stage0.w;
- }
- mediump vec4 output_Stage2;
- {
- output_Stage2 = texture2D(uTextureSampler_0_Stage2, vTransformedCoords_1_Stage0).wwww;
- }
- {
- gl_FragColor = output_Stage1 * output_Stage2;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bounds_increment_kernel_texture.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bounds_increment_kernel_texture.glsl
deleted file mode 100644
index 42c9f0f..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bounds_increment_kernel_texture.glsl
+++ /dev/null
@@ -1,145 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec2 uImageIncrement_Stage1;
-uniform mediump vec2 uBounds_Stage1;
-uniform mediump vec4 uKernel_Stage1[7];
-uniform sampler2D uTextureSampler_0_Stage1;
-varying highp vec2 vTransformedCoords_0_Stage0;
-void main() {
- mediump vec4 output_Stage1;
- {
- output_Stage1 = vec4(0.0, 0.0, 0.0, 0.0);
- highp vec2 coord = vTransformedCoords_0_Stage0 - 12.0 * uImageIncrement_Stage1;
- highp vec2 coordSampled = vec2(0.0, 0.0);
- coordSampled = coord;
- if (coord.x >= uBounds_Stage1.x && coord.x <= uBounds_Stage1.y) {
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[0].x;
- }
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- if (coord.x >= uBounds_Stage1.x && coord.x <= uBounds_Stage1.y) {
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[0].y;
- }
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- if (coord.x >= uBounds_Stage1.x && coord.x <= uBounds_Stage1.y) {
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[0].z;
- }
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- if (coord.x >= uBounds_Stage1.x && coord.x <= uBounds_Stage1.y) {
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[0].w;
- }
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- if (coord.x >= uBounds_Stage1.x && coord.x <= uBounds_Stage1.y) {
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[1].x;
- }
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- if (coord.x >= uBounds_Stage1.x && coord.x <= uBounds_Stage1.y) {
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[1].y;
- }
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- if (coord.x >= uBounds_Stage1.x && coord.x <= uBounds_Stage1.y) {
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[1].z;
- }
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- if (coord.x >= uBounds_Stage1.x && coord.x <= uBounds_Stage1.y) {
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[1].w;
- }
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- if (coord.x >= uBounds_Stage1.x && coord.x <= uBounds_Stage1.y) {
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[2].x;
- }
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- if (coord.x >= uBounds_Stage1.x && coord.x <= uBounds_Stage1.y) {
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[2].y;
- }
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- if (coord.x >= uBounds_Stage1.x && coord.x <= uBounds_Stage1.y) {
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[2].z;
- }
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- if (coord.x >= uBounds_Stage1.x && coord.x <= uBounds_Stage1.y) {
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[2].w;
- }
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- if (coord.x >= uBounds_Stage1.x && coord.x <= uBounds_Stage1.y) {
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[3].x;
- }
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- if (coord.x >= uBounds_Stage1.x && coord.x <= uBounds_Stage1.y) {
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[3].y;
- }
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- if (coord.x >= uBounds_Stage1.x && coord.x <= uBounds_Stage1.y) {
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[3].z;
- }
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- if (coord.x >= uBounds_Stage1.x && coord.x <= uBounds_Stage1.y) {
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[3].w;
- }
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- if (coord.x >= uBounds_Stage1.x && coord.x <= uBounds_Stage1.y) {
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[4].x;
- }
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- if (coord.x >= uBounds_Stage1.x && coord.x <= uBounds_Stage1.y) {
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[4].y;
- }
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- if (coord.x >= uBounds_Stage1.x && coord.x <= uBounds_Stage1.y) {
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[4].z;
- }
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- if (coord.x >= uBounds_Stage1.x && coord.x <= uBounds_Stage1.y) {
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[4].w;
- }
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- if (coord.x >= uBounds_Stage1.x && coord.x <= uBounds_Stage1.y) {
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[5].x;
- }
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- if (coord.x >= uBounds_Stage1.x && coord.x <= uBounds_Stage1.y) {
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[5].y;
- }
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- if (coord.x >= uBounds_Stage1.x && coord.x <= uBounds_Stage1.y) {
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[5].z;
- }
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- if (coord.x >= uBounds_Stage1.x && coord.x <= uBounds_Stage1.y) {
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[5].w;
- }
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- if (coord.x >= uBounds_Stage1.x && coord.x <= uBounds_Stage1.y) {
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[6].x;
- }
- coord += uImageIncrement_Stage1;
- }
- {
- gl_FragColor = output_Stage1;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bounds_increment_kernel_texture_2.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bounds_increment_kernel_texture_2.glsl
deleted file mode 100644
index f3743d7..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bounds_increment_kernel_texture_2.glsl
+++ /dev/null
@@ -1,145 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec2 uImageIncrement_Stage1;
-uniform mediump vec2 uBounds_Stage1;
-uniform mediump vec4 uKernel_Stage1[7];
-uniform sampler2D uTextureSampler_0_Stage1;
-varying highp vec2 vTransformedCoords_0_Stage0;
-void main() {
- mediump vec4 output_Stage1;
- {
- output_Stage1 = vec4(0.0, 0.0, 0.0, 0.0);
- highp vec2 coord = vTransformedCoords_0_Stage0 - 12.0 * uImageIncrement_Stage1;
- highp vec2 coordSampled = vec2(0.0, 0.0);
- coordSampled = coord;
- if (coord.y >= uBounds_Stage1.x && coord.y <= uBounds_Stage1.y) {
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[0].x;
- }
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- if (coord.y >= uBounds_Stage1.x && coord.y <= uBounds_Stage1.y) {
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[0].y;
- }
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- if (coord.y >= uBounds_Stage1.x && coord.y <= uBounds_Stage1.y) {
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[0].z;
- }
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- if (coord.y >= uBounds_Stage1.x && coord.y <= uBounds_Stage1.y) {
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[0].w;
- }
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- if (coord.y >= uBounds_Stage1.x && coord.y <= uBounds_Stage1.y) {
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[1].x;
- }
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- if (coord.y >= uBounds_Stage1.x && coord.y <= uBounds_Stage1.y) {
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[1].y;
- }
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- if (coord.y >= uBounds_Stage1.x && coord.y <= uBounds_Stage1.y) {
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[1].z;
- }
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- if (coord.y >= uBounds_Stage1.x && coord.y <= uBounds_Stage1.y) {
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[1].w;
- }
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- if (coord.y >= uBounds_Stage1.x && coord.y <= uBounds_Stage1.y) {
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[2].x;
- }
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- if (coord.y >= uBounds_Stage1.x && coord.y <= uBounds_Stage1.y) {
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[2].y;
- }
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- if (coord.y >= uBounds_Stage1.x && coord.y <= uBounds_Stage1.y) {
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[2].z;
- }
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- if (coord.y >= uBounds_Stage1.x && coord.y <= uBounds_Stage1.y) {
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[2].w;
- }
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- if (coord.y >= uBounds_Stage1.x && coord.y <= uBounds_Stage1.y) {
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[3].x;
- }
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- if (coord.y >= uBounds_Stage1.x && coord.y <= uBounds_Stage1.y) {
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[3].y;
- }
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- if (coord.y >= uBounds_Stage1.x && coord.y <= uBounds_Stage1.y) {
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[3].z;
- }
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- if (coord.y >= uBounds_Stage1.x && coord.y <= uBounds_Stage1.y) {
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[3].w;
- }
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- if (coord.y >= uBounds_Stage1.x && coord.y <= uBounds_Stage1.y) {
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[4].x;
- }
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- if (coord.y >= uBounds_Stage1.x && coord.y <= uBounds_Stage1.y) {
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[4].y;
- }
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- if (coord.y >= uBounds_Stage1.x && coord.y <= uBounds_Stage1.y) {
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[4].z;
- }
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- if (coord.y >= uBounds_Stage1.x && coord.y <= uBounds_Stage1.y) {
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[4].w;
- }
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- if (coord.y >= uBounds_Stage1.x && coord.y <= uBounds_Stage1.y) {
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[5].x;
- }
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- if (coord.y >= uBounds_Stage1.x && coord.y <= uBounds_Stage1.y) {
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[5].y;
- }
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- if (coord.y >= uBounds_Stage1.x && coord.y <= uBounds_Stage1.y) {
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[5].z;
- }
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- if (coord.y >= uBounds_Stage1.x && coord.y <= uBounds_Stage1.y) {
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[5].w;
- }
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- if (coord.y >= uBounds_Stage1.x && coord.y <= uBounds_Stage1.y) {
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[6].x;
- }
- coord += uImageIncrement_Stage1;
- }
- {
- gl_FragColor = output_Stage1;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bounds_increment_kernel_texture_3.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bounds_increment_kernel_texture_3.glsl
deleted file mode 100644
index 4285cdf..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bounds_increment_kernel_texture_3.glsl
+++ /dev/null
@@ -1,120 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec2 uImageIncrement_Stage1;
-uniform mediump vec2 uBounds_Stage1;
-uniform mediump vec4 uKernel_Stage1[7];
-uniform sampler2D uTextureSampler_0_Stage1;
-varying highp vec2 vTransformedCoords_0_Stage0;
-void main() {
- mediump vec4 output_Stage1;
- {
- output_Stage1 = vec4(0.0, 0.0, 0.0, 0.0);
- highp vec2 coord = vTransformedCoords_0_Stage0 - 12.0 * uImageIncrement_Stage1;
- highp vec2 coordSampled = vec2(0.0, 0.0);
- coordSampled = coord;
- coordSampled.y = clamp(coord.y, uBounds_Stage1.x, uBounds_Stage1.y);
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[0].x;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- coordSampled.y = clamp(coord.y, uBounds_Stage1.x, uBounds_Stage1.y);
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[0].y;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- coordSampled.y = clamp(coord.y, uBounds_Stage1.x, uBounds_Stage1.y);
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[0].z;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- coordSampled.y = clamp(coord.y, uBounds_Stage1.x, uBounds_Stage1.y);
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[0].w;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- coordSampled.y = clamp(coord.y, uBounds_Stage1.x, uBounds_Stage1.y);
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[1].x;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- coordSampled.y = clamp(coord.y, uBounds_Stage1.x, uBounds_Stage1.y);
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[1].y;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- coordSampled.y = clamp(coord.y, uBounds_Stage1.x, uBounds_Stage1.y);
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[1].z;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- coordSampled.y = clamp(coord.y, uBounds_Stage1.x, uBounds_Stage1.y);
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[1].w;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- coordSampled.y = clamp(coord.y, uBounds_Stage1.x, uBounds_Stage1.y);
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[2].x;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- coordSampled.y = clamp(coord.y, uBounds_Stage1.x, uBounds_Stage1.y);
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[2].y;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- coordSampled.y = clamp(coord.y, uBounds_Stage1.x, uBounds_Stage1.y);
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[2].z;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- coordSampled.y = clamp(coord.y, uBounds_Stage1.x, uBounds_Stage1.y);
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[2].w;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- coordSampled.y = clamp(coord.y, uBounds_Stage1.x, uBounds_Stage1.y);
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[3].x;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- coordSampled.y = clamp(coord.y, uBounds_Stage1.x, uBounds_Stage1.y);
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[3].y;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- coordSampled.y = clamp(coord.y, uBounds_Stage1.x, uBounds_Stage1.y);
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[3].z;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- coordSampled.y = clamp(coord.y, uBounds_Stage1.x, uBounds_Stage1.y);
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[3].w;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- coordSampled.y = clamp(coord.y, uBounds_Stage1.x, uBounds_Stage1.y);
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[4].x;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- coordSampled.y = clamp(coord.y, uBounds_Stage1.x, uBounds_Stage1.y);
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[4].y;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- coordSampled.y = clamp(coord.y, uBounds_Stage1.x, uBounds_Stage1.y);
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[4].z;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- coordSampled.y = clamp(coord.y, uBounds_Stage1.x, uBounds_Stage1.y);
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[4].w;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- coordSampled.y = clamp(coord.y, uBounds_Stage1.x, uBounds_Stage1.y);
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[5].x;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- coordSampled.y = clamp(coord.y, uBounds_Stage1.x, uBounds_Stage1.y);
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[5].y;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- coordSampled.y = clamp(coord.y, uBounds_Stage1.x, uBounds_Stage1.y);
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[5].z;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- coordSampled.y = clamp(coord.y, uBounds_Stage1.x, uBounds_Stage1.y);
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[5].w;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- coordSampled.y = clamp(coord.y, uBounds_Stage1.x, uBounds_Stage1.y);
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[6].x;
- coord += uImageIncrement_Stage1;
- }
- {
- gl_FragColor = output_Stage1;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bounds_increment_kernel_texture_4.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bounds_increment_kernel_texture_4.glsl
deleted file mode 100644
index 768ef3a..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_bounds_increment_kernel_texture_4.glsl
+++ /dev/null
@@ -1,120 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec2 uImageIncrement_Stage1;
-uniform mediump vec2 uBounds_Stage1;
-uniform mediump vec4 uKernel_Stage1[7];
-uniform sampler2D uTextureSampler_0_Stage1;
-varying highp vec2 vTransformedCoords_0_Stage0;
-void main() {
- mediump vec4 output_Stage1;
- {
- output_Stage1 = vec4(0.0, 0.0, 0.0, 0.0);
- highp vec2 coord = vTransformedCoords_0_Stage0 - 12.0 * uImageIncrement_Stage1;
- highp vec2 coordSampled = vec2(0.0, 0.0);
- coordSampled = coord;
- coordSampled.x = clamp(coord.x, uBounds_Stage1.x, uBounds_Stage1.y);
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[0].x;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- coordSampled.x = clamp(coord.x, uBounds_Stage1.x, uBounds_Stage1.y);
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[0].y;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- coordSampled.x = clamp(coord.x, uBounds_Stage1.x, uBounds_Stage1.y);
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[0].z;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- coordSampled.x = clamp(coord.x, uBounds_Stage1.x, uBounds_Stage1.y);
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[0].w;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- coordSampled.x = clamp(coord.x, uBounds_Stage1.x, uBounds_Stage1.y);
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[1].x;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- coordSampled.x = clamp(coord.x, uBounds_Stage1.x, uBounds_Stage1.y);
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[1].y;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- coordSampled.x = clamp(coord.x, uBounds_Stage1.x, uBounds_Stage1.y);
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[1].z;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- coordSampled.x = clamp(coord.x, uBounds_Stage1.x, uBounds_Stage1.y);
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[1].w;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- coordSampled.x = clamp(coord.x, uBounds_Stage1.x, uBounds_Stage1.y);
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[2].x;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- coordSampled.x = clamp(coord.x, uBounds_Stage1.x, uBounds_Stage1.y);
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[2].y;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- coordSampled.x = clamp(coord.x, uBounds_Stage1.x, uBounds_Stage1.y);
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[2].z;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- coordSampled.x = clamp(coord.x, uBounds_Stage1.x, uBounds_Stage1.y);
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[2].w;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- coordSampled.x = clamp(coord.x, uBounds_Stage1.x, uBounds_Stage1.y);
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[3].x;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- coordSampled.x = clamp(coord.x, uBounds_Stage1.x, uBounds_Stage1.y);
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[3].y;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- coordSampled.x = clamp(coord.x, uBounds_Stage1.x, uBounds_Stage1.y);
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[3].z;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- coordSampled.x = clamp(coord.x, uBounds_Stage1.x, uBounds_Stage1.y);
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[3].w;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- coordSampled.x = clamp(coord.x, uBounds_Stage1.x, uBounds_Stage1.y);
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[4].x;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- coordSampled.x = clamp(coord.x, uBounds_Stage1.x, uBounds_Stage1.y);
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[4].y;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- coordSampled.x = clamp(coord.x, uBounds_Stage1.x, uBounds_Stage1.y);
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[4].z;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- coordSampled.x = clamp(coord.x, uBounds_Stage1.x, uBounds_Stage1.y);
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[4].w;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- coordSampled.x = clamp(coord.x, uBounds_Stage1.x, uBounds_Stage1.y);
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[5].x;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- coordSampled.x = clamp(coord.x, uBounds_Stage1.x, uBounds_Stage1.y);
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[5].y;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- coordSampled.x = clamp(coord.x, uBounds_Stage1.x, uBounds_Stage1.y);
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[5].z;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- coordSampled.x = clamp(coord.x, uBounds_Stage1.x, uBounds_Stage1.y);
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[5].w;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- coordSampled.x = clamp(coord.x, uBounds_Stage1.x, uBounds_Stage1.y);
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[6].x;
- coord += uImageIncrement_Stage1;
- }
- {
- gl_FragColor = output_Stage1;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_circle_color.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_circle_color.glsl
deleted file mode 100644
index 4ded4d6..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_circle_color.glsl
+++ /dev/null
@@ -1,25 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-varying highp vec4 vinCircleEdge_Stage0;
-varying mediump vec4 vinColor_Stage0;
-void main() {
- mediump vec4 outputColor_Stage0;
- mediump vec4 outputCoverage_Stage0;
- {
- highp vec4 circleEdge;
- circleEdge = vinCircleEdge_Stage0;
- outputColor_Stage0 = vinColor_Stage0;
- highp float d = length(circleEdge.xy);
- mediump float distanceToOuterEdge = circleEdge.z * (1.0 - d);
- mediump float edgeAlpha = clamp(distanceToOuterEdge, 0.0, 1.0);
- mediump float distanceToInnerEdge = circleEdge.z * (d - circleEdge.w);
- mediump float innerAlpha = clamp(distanceToInnerEdge, 0.0, 1.0);
- edgeAlpha *= innerAlpha;
- outputCoverage_Stage0 = vec4(edgeAlpha);
- }
- {
- gl_FragColor = outputColor_Stage0 * outputCoverage_Stage0;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_circle_color_2.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_circle_color_2.glsl
deleted file mode 100644
index a91964d..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_circle_color_2.glsl
+++ /dev/null
@@ -1,22 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-varying highp vec4 vinCircleEdge_Stage0;
-varying mediump vec4 vinColor_Stage0;
-void main() {
- mediump vec4 outputColor_Stage0;
- mediump vec4 outputCoverage_Stage0;
- {
- highp vec4 circleEdge;
- circleEdge = vinCircleEdge_Stage0;
- outputColor_Stage0 = vinColor_Stage0;
- highp float d = length(circleEdge.xy);
- mediump float distanceToOuterEdge = circleEdge.z * (1.0 - d);
- mediump float edgeAlpha = clamp(distanceToOuterEdge, 0.0, 1.0);
- outputCoverage_Stage0 = vec4(edgeAlpha);
- }
- {
- gl_FragColor = outputColor_Stage0 * outputCoverage_Stage0;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_circle_color_edges.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_circle_color_edges.glsl
deleted file mode 100644
index 450d975..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_circle_color_edges.glsl
+++ /dev/null
@@ -1,43 +0,0 @@
-#version 100
-
-uniform highp float u_skRTHeight;
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec3 uedges_Stage1[4];
-varying highp vec4 vinCircleEdge_Stage0;
-varying mediump vec4 vinColor_Stage0;
-void main() {
-highp vec4 sk_FragCoord = vec4(gl_FragCoord.x, u_skRTHeight - gl_FragCoord.y, gl_FragCoord.z, gl_FragCoord.w);
- mediump vec4 outputColor_Stage0;
- mediump vec4 outputCoverage_Stage0;
- {
- highp vec4 circleEdge;
- circleEdge = vinCircleEdge_Stage0;
- outputColor_Stage0 = vinColor_Stage0;
- highp float d = length(circleEdge.xy);
- mediump float distanceToOuterEdge = circleEdge.z * (1.0 - d);
- mediump float edgeAlpha = clamp(distanceToOuterEdge, 0.0, 1.0);
- outputCoverage_Stage0 = vec4(edgeAlpha);
- }
- mediump vec4 output_Stage1;
- {
- mediump float alpha = 1.0;
- mediump float edge;
- edge = dot(uedges_Stage1[0], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- edge = dot(uedges_Stage1[1], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- edge = dot(uedges_Stage1[2], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- edge = dot(uedges_Stage1[3], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- output_Stage1 = outputCoverage_Stage0 * alpha;
- }
- {
- gl_FragColor = outputColor_Stage0 * output_Stage1;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_circle_color_texture.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_circle_color_texture.glsl
deleted file mode 100644
index eff113e..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_circle_color_texture.glsl
+++ /dev/null
@@ -1,35 +0,0 @@
-#version 100
-
-uniform highp float u_skRTHeight;
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec2 uDstTextureUpperLeft_Stage1;
-uniform mediump vec2 uDstTextureCoordScale_Stage1;
-uniform sampler2D uDstTextureSampler_Stage1;
-varying highp vec4 vinCircleEdge_Stage0;
-varying mediump vec4 vinColor_Stage0;
-void main() {
-highp vec4 sk_FragCoord = vec4(gl_FragCoord.x, u_skRTHeight - gl_FragCoord.y, gl_FragCoord.z, gl_FragCoord.w);
- mediump vec4 outputColor_Stage0;
- mediump vec4 outputCoverage_Stage0;
- {
- highp vec4 circleEdge;
- circleEdge = vinCircleEdge_Stage0;
- outputColor_Stage0 = vinColor_Stage0;
- highp float d = length(circleEdge.xy);
- mediump float distanceToOuterEdge = circleEdge.z * (1.0 - d);
- mediump float edgeAlpha = clamp(distanceToOuterEdge, 0.0, 1.0);
- outputCoverage_Stage0 = vec4(edgeAlpha);
- }
- {
- if (all(lessThanEqual(outputCoverage_Stage0.xyz, vec3(0.0)))) {
- discard;
- }
- mediump vec2 _dstTexCoord = (sk_FragCoord.xy - uDstTextureUpperLeft_Stage1) * uDstTextureCoordScale_Stage1;
- _dstTexCoord.y = 1.0 - _dstTexCoord.y;
- mediump vec4 _dstColor = texture2D(uDstTextureSampler_Stage1, _dstTexCoord);
- gl_FragColor.w = outputColor_Stage0.w + (1.0 - outputColor_Stage0.w) * _dstColor.w;
- gl_FragColor.xyz = ((1.0 - outputColor_Stage0.w) * _dstColor.xyz + (1.0 - _dstColor.w) * outputColor_Stage0.xyz) + outputColor_Stage0.xyz * _dstColor.xyz;
- gl_FragColor = outputCoverage_Stage0 * gl_FragColor + (vec4(1.0) - outputCoverage_Stage0) * _dstColor;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_circle_texture.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_circle_texture.glsl
deleted file mode 100644
index 87b7c3e..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_circle_texture.glsl
+++ /dev/null
@@ -1,31 +0,0 @@
-#version 100
-
-uniform highp float u_skRTHeight;
-precision mediump float;
-precision mediump sampler2D;
-uniform highp vec4 ucircle_Stage1;
-uniform sampler2D uTextureSampler_0_Stage0;
-varying highp vec2 vlocalCoord_Stage0;
-void main() {
-highp vec4 sk_FragCoord = vec4(gl_FragCoord.x, u_skRTHeight - gl_FragCoord.y, gl_FragCoord.z, gl_FragCoord.w);
- mediump vec4 outputColor_Stage0;
- {
- outputColor_Stage0 = vec4(1.0);
- highp vec2 texCoord;
- texCoord = vlocalCoord_Stage0;
- outputColor_Stage0 = texture2D(uTextureSampler_0_Stage0, texCoord);
- }
- mediump vec4 output_Stage1;
- {
- mediump float d;
- {
- d = (1.0 - length((ucircle_Stage1.xy - sk_FragCoord.xy) * ucircle_Stage1.w)) * ucircle_Stage1.z;
- }
- {
- output_Stage1 = vec4(clamp(d, 0.0, 1.0));
- }
- }
- {
- gl_FragColor = outputColor_Stage0 * output_Stage1;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color.glsl
deleted file mode 100644
index 7ad14e0..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color.glsl
+++ /dev/null
@@ -1,14 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-varying mediump vec4 vcolor_Stage0;
-void main() {
- mediump vec4 outputColor_Stage0;
- {
- outputColor_Stage0 = vcolor_Stage0;
- }
- {
- gl_FragColor = outputColor_Stage0;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_2.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_2.glsl
deleted file mode 100644
index 0211cf4..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_2.glsl
+++ /dev/null
@@ -1,14 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uColor_Stage0;
-void main() {
- mediump vec4 outputColor_Stage0;
- {
- outputColor_Stage0 = uColor_Stage0;
- }
- {
- gl_FragColor = outputColor_Stage0;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_coverage.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_coverage.glsl
deleted file mode 100644
index 7272bcb..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_coverage.glsl
+++ /dev/null
@@ -1,19 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uColor_Stage0;
-varying mediump float vinCoverage_Stage0;
-void main() {
- mediump vec4 outputColor_Stage0;
- mediump vec4 outputCoverage_Stage0;
- {
- outputColor_Stage0 = uColor_Stage0;
- mediump float alpha = 1.0;
- alpha = vinCoverage_Stage0;
- outputCoverage_Stage0 = vec4(alpha);
- }
- {
- gl_FragColor = outputColor_Stage0 * outputCoverage_Stage0;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_coverage_decal_hairquad_scaletranslate_texdom_texturew.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_coverage_decal_hairquad_scaletranslate_texdom_texturew.glsl
deleted file mode 100644
index cb09752..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_coverage_decal_hairquad_scaletranslate_texdom_texturew.glsl
+++ /dev/null
@@ -1,48 +0,0 @@
-#version 100
-
-#extension GL_OES_standard_derivatives : require
-uniform highp float u_skRTHeight;
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uColor_Stage0;
-uniform mediump float uCoverage_Stage0;
-uniform mediump vec4 uscaleAndTranslate_Stage1;
-uniform mediump vec4 uTexDom_Stage1;
-uniform mediump vec3 uDecalParams_Stage1;
-uniform sampler2D uTextureSampler_0_Stage1;
-varying mediump vec4 vHairQuadEdge_Stage0;
-void main() {
-highp vec4 sk_FragCoord = vec4(gl_FragCoord.x, u_skRTHeight - gl_FragCoord.y, gl_FragCoord.z, gl_FragCoord.w);
- mediump vec4 outputColor_Stage0;
- mediump vec4 outputCoverage_Stage0;
- {
- outputColor_Stage0 = uColor_Stage0;
- mediump float edgeAlpha;
- mediump vec2 duvdx = dFdx(vHairQuadEdge_Stage0.xy);
- mediump vec2 duvdy = -dFdy(vHairQuadEdge_Stage0.xy);
- mediump vec2 gF = vec2((2.0 * vHairQuadEdge_Stage0.x) * duvdx.x - duvdx.y, (2.0 * vHairQuadEdge_Stage0.x) * duvdy.x - duvdy.y);
- edgeAlpha = vHairQuadEdge_Stage0.x * vHairQuadEdge_Stage0.x - vHairQuadEdge_Stage0.y;
- edgeAlpha = sqrt((edgeAlpha * edgeAlpha) / dot(gF, gF));
- edgeAlpha = max(1.0 - edgeAlpha, 0.0);
- outputCoverage_Stage0 = vec4(uCoverage_Stage0 * edgeAlpha);
- }
- mediump vec4 output_Stage1;
- {
- mediump vec2 coords = sk_FragCoord.xy * uscaleAndTranslate_Stage1.xy + uscaleAndTranslate_Stage1.zw;
- {
- highp vec2 origCoord = coords;
- highp vec2 clampedCoord = clamp(origCoord, uTexDom_Stage1.xy, uTexDom_Stage1.zw);
- mediump vec4 inside = texture2D(uTextureSampler_0_Stage1, clampedCoord).wwww * outputCoverage_Stage0;
- mediump float err = max(abs(clampedCoord.x - origCoord.x) * uDecalParams_Stage1.x, abs(clampedCoord.y - origCoord.y) * uDecalParams_Stage1.y);
- if (err > uDecalParams_Stage1.z) {
- err = 1.0;
- } else if (uDecalParams_Stage1.z < 1.0) {
- err = 0.0;
- }
- output_Stage1 = mix(inside, vec4(0.0, 0.0, 0.0, 0.0), err);
- }
- }
- {
- gl_FragColor = outputColor_Stage0 * output_Stage1;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_coverage_decal_scaletranslate_texdom_texturew.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_coverage_decal_scaletranslate_texdom_texturew.glsl
deleted file mode 100644
index d8d359e..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_coverage_decal_scaletranslate_texdom_texturew.glsl
+++ /dev/null
@@ -1,41 +0,0 @@
-#version 100
-
-uniform highp float u_skRTHeight;
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uColor_Stage0;
-uniform mediump vec4 uscaleAndTranslate_Stage1;
-uniform mediump vec4 uTexDom_Stage1;
-uniform mediump vec3 uDecalParams_Stage1;
-uniform sampler2D uTextureSampler_0_Stage1;
-varying mediump float vinCoverage_Stage0;
-void main() {
-highp vec4 sk_FragCoord = vec4(gl_FragCoord.x, u_skRTHeight - gl_FragCoord.y, gl_FragCoord.z, gl_FragCoord.w);
- mediump vec4 outputColor_Stage0;
- mediump vec4 outputCoverage_Stage0;
- {
- outputColor_Stage0 = uColor_Stage0;
- mediump float alpha = 1.0;
- alpha = vinCoverage_Stage0;
- outputCoverage_Stage0 = vec4(alpha);
- }
- mediump vec4 output_Stage1;
- {
- mediump vec2 coords = sk_FragCoord.xy * uscaleAndTranslate_Stage1.xy + uscaleAndTranslate_Stage1.zw;
- {
- highp vec2 origCoord = coords;
- highp vec2 clampedCoord = clamp(origCoord, uTexDom_Stage1.xy, uTexDom_Stage1.zw);
- mediump vec4 inside = texture2D(uTextureSampler_0_Stage1, clampedCoord).wwww * outputCoverage_Stage0;
- mediump float err = max(abs(clampedCoord.x - origCoord.x) * uDecalParams_Stage1.x, abs(clampedCoord.y - origCoord.y) * uDecalParams_Stage1.y);
- if (err > uDecalParams_Stage1.z) {
- err = 1.0;
- } else if (uDecalParams_Stage1.z < 1.0) {
- err = 0.0;
- }
- output_Stage1 = mix(inside, vec4(0.0, 0.0, 0.0, 0.0), err);
- }
- }
- {
- gl_FragColor = outputColor_Stage0 * output_Stage1;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_coverage_edges.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_coverage_edges.glsl
deleted file mode 100644
index 2a9dfce..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_coverage_edges.glsl
+++ /dev/null
@@ -1,40 +0,0 @@
-#version 100
-
-uniform highp float u_skRTHeight;
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uColor_Stage0;
-uniform mediump vec3 uedges_Stage1[4];
-varying mediump float vinCoverage_Stage0;
-void main() {
-highp vec4 sk_FragCoord = vec4(gl_FragCoord.x, u_skRTHeight - gl_FragCoord.y, gl_FragCoord.z, gl_FragCoord.w);
- mediump vec4 outputColor_Stage0;
- mediump vec4 outputCoverage_Stage0;
- {
- outputColor_Stage0 = uColor_Stage0;
- mediump float alpha = 1.0;
- alpha = vinCoverage_Stage0;
- outputCoverage_Stage0 = vec4(alpha);
- }
- mediump vec4 output_Stage1;
- {
- mediump float alpha = 1.0;
- mediump float edge;
- edge = dot(uedges_Stage1[0], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- edge = dot(uedges_Stage1[1], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- edge = dot(uedges_Stage1[2], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- edge = dot(uedges_Stage1[3], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- output_Stage1 = outputCoverage_Stage0 * alpha;
- }
- {
- gl_FragColor = outputColor_Stage0 * output_Stage1;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_coverage_edges_geomdomain.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_coverage_edges_geomdomain.glsl
deleted file mode 100644
index 8b100d2..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_coverage_edges_geomdomain.glsl
+++ /dev/null
@@ -1,47 +0,0 @@
-#version 100
-
-uniform highp float u_skRTHeight;
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec3 uedges_Stage1[4];
-varying mediump vec4 vcolor_Stage0;
-varying highp float vcoverage_Stage0;
-varying highp vec4 vgeomDomain_Stage0;
-void main() {
-highp vec4 sk_FragCoord = vec4(gl_FragCoord.x, u_skRTHeight - gl_FragCoord.y, gl_FragCoord.z, gl_FragCoord.w);
- mediump vec4 outputColor_Stage0;
- mediump vec4 outputCoverage_Stage0;
- {
- outputColor_Stage0 = vcolor_Stage0;
- highp float coverage = vcoverage_Stage0 * sk_FragCoord.w;
- highp vec4 geoDomain;
- geoDomain = vgeomDomain_Stage0;
- if (coverage < 0.5) {
- highp vec4 dists4 = clamp(vec4(1.0, 1.0, -1.0, -1.0) * (sk_FragCoord.xyxy - geoDomain), 0.0, 1.0);
- highp vec2 dists2 = dists4.xy * dists4.zw;
- coverage = min(coverage, dists2.x * dists2.y);
- }
- outputCoverage_Stage0 = vec4(coverage);
- }
- mediump vec4 output_Stage1;
- {
- mediump float alpha = 1.0;
- mediump float edge;
- edge = dot(uedges_Stage1[0], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- edge = dot(uedges_Stage1[1], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- edge = dot(uedges_Stage1[2], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- edge = dot(uedges_Stage1[3], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- output_Stage1 = outputCoverage_Stage0 * alpha;
- }
- {
- gl_FragColor = outputColor_Stage0 * output_Stage1;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_coverage_edges_hairquad.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_coverage_edges_hairquad.glsl
deleted file mode 100644
index 43b519a..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_coverage_edges_hairquad.glsl
+++ /dev/null
@@ -1,47 +0,0 @@
-#version 100
-
-#extension GL_OES_standard_derivatives : require
-uniform highp float u_skRTHeight;
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uColor_Stage0;
-uniform mediump float uCoverage_Stage0;
-uniform mediump vec3 uedges_Stage1[4];
-varying mediump vec4 vHairQuadEdge_Stage0;
-void main() {
-highp vec4 sk_FragCoord = vec4(gl_FragCoord.x, u_skRTHeight - gl_FragCoord.y, gl_FragCoord.z, gl_FragCoord.w);
- mediump vec4 outputColor_Stage0;
- mediump vec4 outputCoverage_Stage0;
- {
- outputColor_Stage0 = uColor_Stage0;
- mediump float edgeAlpha;
- mediump vec2 duvdx = dFdx(vHairQuadEdge_Stage0.xy);
- mediump vec2 duvdy = -dFdy(vHairQuadEdge_Stage0.xy);
- mediump vec2 gF = vec2((2.0 * vHairQuadEdge_Stage0.x) * duvdx.x - duvdx.y, (2.0 * vHairQuadEdge_Stage0.x) * duvdy.x - duvdy.y);
- edgeAlpha = vHairQuadEdge_Stage0.x * vHairQuadEdge_Stage0.x - vHairQuadEdge_Stage0.y;
- edgeAlpha = sqrt((edgeAlpha * edgeAlpha) / dot(gF, gF));
- edgeAlpha = max(1.0 - edgeAlpha, 0.0);
- outputCoverage_Stage0 = vec4(uCoverage_Stage0 * edgeAlpha);
- }
- mediump vec4 output_Stage1;
- {
- mediump float alpha = 1.0;
- mediump float edge;
- edge = dot(uedges_Stage1[0], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- edge = dot(uedges_Stage1[1], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- edge = dot(uedges_Stage1[2], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- edge = dot(uedges_Stage1[3], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- output_Stage1 = outputCoverage_Stage0 * alpha;
- }
- {
- gl_FragColor = outputColor_Stage0 * output_Stage1;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_coverage_geomdomain.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_coverage_geomdomain.glsl
deleted file mode 100644
index 50d9a4e..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_coverage_geomdomain.glsl
+++ /dev/null
@@ -1,28 +0,0 @@
-#version 100
-
-uniform highp float u_skRTHeight;
-precision mediump float;
-precision mediump sampler2D;
-varying mediump vec4 vcolor_Stage0;
-varying highp float vcoverage_Stage0;
-varying highp vec4 vgeomDomain_Stage0;
-void main() {
-highp vec4 sk_FragCoord = vec4(gl_FragCoord.x, u_skRTHeight - gl_FragCoord.y, gl_FragCoord.z, gl_FragCoord.w);
- mediump vec4 outputColor_Stage0;
- mediump vec4 outputCoverage_Stage0;
- {
- outputColor_Stage0 = vcolor_Stage0;
- highp float coverage = vcoverage_Stage0;
- highp vec4 geoDomain;
- geoDomain = vgeomDomain_Stage0;
- if (coverage < 0.5) {
- highp vec4 dists4 = clamp(vec4(1.0, 1.0, -1.0, -1.0) * (sk_FragCoord.xyxy - geoDomain), 0.0, 1.0);
- highp vec2 dists2 = dists4.xy * dists4.zw;
- coverage = min(coverage, dists2.x * dists2.y);
- }
- outputCoverage_Stage0 = vec4(coverage);
- }
- {
- gl_FragColor = outputColor_Stage0 * outputCoverage_Stage0;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_coverage_geomdomain_2.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_coverage_geomdomain_2.glsl
deleted file mode 100644
index ef5504c..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_coverage_geomdomain_2.glsl
+++ /dev/null
@@ -1,28 +0,0 @@
-#version 100
-
-uniform highp float u_skRTHeight;
-precision mediump float;
-precision mediump sampler2D;
-varying mediump vec4 vcolor_Stage0;
-varying highp float vcoverage_Stage0;
-varying highp vec4 vgeomDomain_Stage0;
-void main() {
-highp vec4 sk_FragCoord = vec4(gl_FragCoord.x, u_skRTHeight - gl_FragCoord.y, gl_FragCoord.z, gl_FragCoord.w);
- mediump vec4 outputColor_Stage0;
- mediump vec4 outputCoverage_Stage0;
- {
- outputColor_Stage0 = vcolor_Stage0;
- highp float coverage = vcoverage_Stage0 * sk_FragCoord.w;
- highp vec4 geoDomain;
- geoDomain = vgeomDomain_Stage0;
- if (coverage < 0.5) {
- highp vec4 dists4 = clamp(vec4(1.0, 1.0, -1.0, -1.0) * (sk_FragCoord.xyxy - geoDomain), 0.0, 1.0);
- highp vec2 dists2 = dists4.xy * dists4.zw;
- coverage = min(coverage, dists2.x * dists2.y);
- }
- outputCoverage_Stage0 = vec4(coverage);
- }
- {
- gl_FragColor = outputColor_Stage0 * outputCoverage_Stage0;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_coverage_hairquad.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_coverage_hairquad.glsl
deleted file mode 100644
index 894f33a..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_coverage_hairquad.glsl
+++ /dev/null
@@ -1,26 +0,0 @@
-#version 100
-
-#extension GL_OES_standard_derivatives : require
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uColor_Stage0;
-uniform mediump float uCoverage_Stage0;
-varying mediump vec4 vHairQuadEdge_Stage0;
-void main() {
- mediump vec4 outputColor_Stage0;
- mediump vec4 outputCoverage_Stage0;
- {
- outputColor_Stage0 = uColor_Stage0;
- mediump float edgeAlpha;
- mediump vec2 duvdx = dFdx(vHairQuadEdge_Stage0.xy);
- mediump vec2 duvdy = -dFdy(vHairQuadEdge_Stage0.xy);
- mediump vec2 gF = vec2((2.0 * vHairQuadEdge_Stage0.x) * duvdx.x - duvdx.y, (2.0 * vHairQuadEdge_Stage0.x) * duvdy.x - duvdy.y);
- edgeAlpha = vHairQuadEdge_Stage0.x * vHairQuadEdge_Stage0.x - vHairQuadEdge_Stage0.y;
- edgeAlpha = sqrt((edgeAlpha * edgeAlpha) / dot(gF, gF));
- edgeAlpha = max(1.0 - edgeAlpha, 0.0);
- outputCoverage_Stage0 = vec4(uCoverage_Stage0 * edgeAlpha);
- }
- {
- gl_FragColor = outputColor_Stage0 * outputCoverage_Stage0;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_coverage_texdom_texture.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_coverage_texdom_texture.glsl
deleted file mode 100644
index 76979ee..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_coverage_texdom_texture.glsl
+++ /dev/null
@@ -1,49 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uTexDom_Stage1_c0_c0;
-uniform mediump vec4 ucolor_Stage2_c1_c0;
-uniform sampler2D uTextureSampler_0_Stage1;
-varying highp vec2 vTransformedCoords_0_Stage0;
-varying mediump vec4 vcolor_Stage0;
-varying highp float vcoverage_Stage0;
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1992;
- {
- highp vec2 origCoord = vTransformedCoords_0_Stage0;
- highp vec2 clampedCoord = clamp(origCoord, uTexDom_Stage1_c0_c0.xy, uTexDom_Stage1_c0_c0.zw);
- mediump vec4 inside = texture2D(uTextureSampler_0_Stage1, clampedCoord) * _input;
- _sample1992 = inside;
- }
- return _sample1992;
-}
-mediump vec4 stage_Stage2_c1_c0(mediump vec4 _input) {
- mediump vec4 child;
- {
- child = ucolor_Stage2_c1_c0;
- }
- return child;
-}
-void main() {
- mediump vec4 outputCoverage_Stage0;
- {
- highp float coverage = vcoverage_Stage0;
- outputCoverage_Stage0 = vec4(coverage);
- }
- mediump vec4 output_Stage1;
- {
- mediump vec4 _sample1992;
- _sample1992 = stage_Stage1_c0_c0(vec4(1.0, 1.0, 1.0, 1.0));
- output_Stage1 = _sample1992;
- }
- mediump vec4 output_Stage2;
- {
- mediump vec4 child;
- child = stage_Stage2_c1_c0(vec4(1.0));
- output_Stage2 = child * output_Stage1.w;
- }
- {
- gl_FragColor = output_Stage2 * outputCoverage_Stage0;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_coverage_texture.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_coverage_texture.glsl
deleted file mode 100644
index b4fbafc..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_coverage_texture.glsl
+++ /dev/null
@@ -1,29 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform sampler2D uTextureSampler_0_Stage1;
-varying highp vec2 vTransformedCoords_0_Stage0;
-varying mediump vec4 vcolor_Stage0;
-varying highp float vcoverage_Stage0;
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1992;
- _sample1992 = _input * texture2D(uTextureSampler_0_Stage1, vTransformedCoords_0_Stage0);
- return _sample1992;
-}
-void main() {
- mediump vec4 outputCoverage_Stage0;
- {
- highp float coverage = vcoverage_Stage0;
- outputCoverage_Stage0 = vec4(coverage);
- }
- mediump vec4 output_Stage1;
- {
- mediump vec4 _sample1992;
- _sample1992 = stage_Stage1_c0_c0(vec4(1.0, 1.0, 1.0, 1.0));
- output_Stage1 = _sample1992;
- }
- {
- gl_FragColor = output_Stage1 * outputCoverage_Stage0;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_decal_edges_quad_scaletranslate_texdom_texturew.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_decal_edges_quad_scaletranslate_texdom_texturew.glsl
deleted file mode 100644
index 7bc3642..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_decal_edges_quad_scaletranslate_texdom_texturew.glsl
+++ /dev/null
@@ -1,69 +0,0 @@
-#version 100
-
-#extension GL_OES_standard_derivatives : require
-uniform highp float u_skRTHeight;
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uscaleAndTranslate_Stage1;
-uniform mediump vec4 uTexDom_Stage1;
-uniform mediump vec3 uDecalParams_Stage1;
-uniform mediump vec3 uedges_Stage2[4];
-uniform sampler2D uTextureSampler_0_Stage1;
-varying mediump vec4 vQuadEdge_Stage0;
-varying mediump vec4 vinColor_Stage0;
-void main() {
-highp vec4 sk_FragCoord = vec4(gl_FragCoord.x, u_skRTHeight - gl_FragCoord.y, gl_FragCoord.z, gl_FragCoord.w);
- mediump vec4 outputColor_Stage0;
- mediump vec4 outputCoverage_Stage0;
- {
- outputColor_Stage0 = vinColor_Stage0;
- mediump float edgeAlpha;
- mediump vec2 duvdx = dFdx(vQuadEdge_Stage0.xy);
- mediump vec2 duvdy = -dFdy(vQuadEdge_Stage0.xy);
- if (vQuadEdge_Stage0.z > 0.0 && vQuadEdge_Stage0.w > 0.0) {
- edgeAlpha = min(min(vQuadEdge_Stage0.z, vQuadEdge_Stage0.w) + 0.5, 1.0);
- } else {
- mediump vec2 gF = vec2((2.0 * vQuadEdge_Stage0.x) * duvdx.x - duvdx.y, (2.0 * vQuadEdge_Stage0.x) * duvdy.x - duvdy.y);
- edgeAlpha = vQuadEdge_Stage0.x * vQuadEdge_Stage0.x - vQuadEdge_Stage0.y;
- edgeAlpha = clamp(0.5 - edgeAlpha / length(gF), 0.0, 1.0);
- }
- outputCoverage_Stage0 = vec4(edgeAlpha);
- }
- mediump vec4 output_Stage1;
- {
- mediump vec2 coords = sk_FragCoord.xy * uscaleAndTranslate_Stage1.xy + uscaleAndTranslate_Stage1.zw;
- {
- highp vec2 origCoord = coords;
- highp vec2 clampedCoord = clamp(origCoord, uTexDom_Stage1.xy, uTexDom_Stage1.zw);
- mediump vec4 inside = texture2D(uTextureSampler_0_Stage1, clampedCoord).wwww * outputCoverage_Stage0;
- mediump float err = max(abs(clampedCoord.x - origCoord.x) * uDecalParams_Stage1.x, abs(clampedCoord.y - origCoord.y) * uDecalParams_Stage1.y);
- if (err > uDecalParams_Stage1.z) {
- err = 1.0;
- } else if (uDecalParams_Stage1.z < 1.0) {
- err = 0.0;
- }
- output_Stage1 = mix(inside, vec4(0.0, 0.0, 0.0, 0.0), err);
- }
- }
- mediump vec4 output_Stage2;
- {
- mediump float alpha = 1.0;
- mediump float edge;
- edge = dot(uedges_Stage2[0], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- edge = dot(uedges_Stage2[1], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- edge = dot(uedges_Stage2[2], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- edge = dot(uedges_Stage2[3], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- output_Stage2 = output_Stage1 * alpha;
- }
- {
- gl_FragColor = outputColor_Stage0 * output_Stage2;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_decal_edges_scaletranslate_texdom_texturew.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_decal_edges_scaletranslate_texdom_texturew.glsl
deleted file mode 100644
index 8f976a2..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_decal_edges_scaletranslate_texdom_texturew.glsl
+++ /dev/null
@@ -1,52 +0,0 @@
-#version 100
-
-uniform highp float u_skRTHeight;
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uscaleAndTranslate_Stage1;
-uniform mediump vec4 uTexDom_Stage1;
-uniform mediump vec3 uDecalParams_Stage1;
-uniform mediump vec3 uedges_Stage2[3];
-uniform sampler2D uTextureSampler_0_Stage1;
-varying mediump vec4 vcolor_Stage0;
-void main() {
-highp vec4 sk_FragCoord = vec4(gl_FragCoord.x, u_skRTHeight - gl_FragCoord.y, gl_FragCoord.z, gl_FragCoord.w);
- mediump vec4 outputColor_Stage0;
- {
- outputColor_Stage0 = vcolor_Stage0;
- }
- mediump vec4 output_Stage1;
- {
- mediump vec2 coords = sk_FragCoord.xy * uscaleAndTranslate_Stage1.xy + uscaleAndTranslate_Stage1.zw;
- {
- highp vec2 origCoord = coords;
- highp vec2 clampedCoord = clamp(origCoord, uTexDom_Stage1.xy, uTexDom_Stage1.zw);
- mediump vec4 inside = texture2D(uTextureSampler_0_Stage1, clampedCoord).wwww;
- mediump float err = max(abs(clampedCoord.x - origCoord.x) * uDecalParams_Stage1.x, abs(clampedCoord.y - origCoord.y) * uDecalParams_Stage1.y);
- if (err > uDecalParams_Stage1.z) {
- err = 1.0;
- } else if (uDecalParams_Stage1.z < 1.0) {
- err = 0.0;
- }
- output_Stage1 = mix(inside, vec4(0.0, 0.0, 0.0, 0.0), err);
- }
- }
- mediump vec4 output_Stage2;
- {
- mediump float alpha = 1.0;
- mediump float edge;
- edge = dot(uedges_Stage2[0], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- edge = dot(uedges_Stage2[1], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- edge = dot(uedges_Stage2[2], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- output_Stage2 = output_Stage1 * alpha;
- }
- {
- gl_FragColor = outputColor_Stage0 * output_Stage2;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_decal_edges_scaletranslate_texdom_texturew_2.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_decal_edges_scaletranslate_texdom_texturew_2.glsl
deleted file mode 100644
index ede7578..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_decal_edges_scaletranslate_texdom_texturew_2.glsl
+++ /dev/null
@@ -1,61 +0,0 @@
-#version 100
-
-uniform highp float u_skRTHeight;
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uscaleAndTranslate_Stage2;
-uniform mediump vec4 uTexDom_Stage2;
-uniform mediump vec3 uDecalParams_Stage2;
-uniform mediump vec3 uedges_Stage3[4];
-uniform sampler2D uTextureSampler_0_Stage1;
-uniform sampler2D uTextureSampler_0_Stage2;
-varying highp vec2 vTransformedCoords_0_Stage0;
-varying mediump vec4 vcolor_Stage0;
-void main() {
-highp vec4 sk_FragCoord = vec4(gl_FragCoord.x, u_skRTHeight - gl_FragCoord.y, gl_FragCoord.z, gl_FragCoord.w);
- mediump vec4 outputColor_Stage0;
- {
- outputColor_Stage0 = vcolor_Stage0;
- }
- mediump vec4 output_Stage1;
- {
- output_Stage1 = texture2D(uTextureSampler_0_Stage1, vTransformedCoords_0_Stage0).wwww;
- }
- mediump vec4 output_Stage2;
- {
- mediump vec2 coords = sk_FragCoord.xy * uscaleAndTranslate_Stage2.xy + uscaleAndTranslate_Stage2.zw;
- {
- highp vec2 origCoord = coords;
- highp vec2 clampedCoord = clamp(origCoord, uTexDom_Stage2.xy, uTexDom_Stage2.zw);
- mediump vec4 inside = texture2D(uTextureSampler_0_Stage2, clampedCoord).wwww * output_Stage1;
- mediump float err = max(abs(clampedCoord.x - origCoord.x) * uDecalParams_Stage2.x, abs(clampedCoord.y - origCoord.y) * uDecalParams_Stage2.y);
- if (err > uDecalParams_Stage2.z) {
- err = 1.0;
- } else if (uDecalParams_Stage2.z < 1.0) {
- err = 0.0;
- }
- output_Stage2 = mix(inside, vec4(0.0, 0.0, 0.0, 0.0), err);
- }
- }
- mediump vec4 output_Stage3;
- {
- mediump float alpha = 1.0;
- mediump float edge;
- edge = dot(uedges_Stage3[0], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- edge = dot(uedges_Stage3[1], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- edge = dot(uedges_Stage3[2], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- edge = dot(uedges_Stage3[3], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- output_Stage3 = output_Stage2 * alpha;
- }
- {
- gl_FragColor = outputColor_Stage0 * output_Stage3;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_decal_edges_scaletranslate_texdom_texturew_3.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_decal_edges_scaletranslate_texdom_texturew_3.glsl
deleted file mode 100644
index 53738e5..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_decal_edges_scaletranslate_texdom_texturew_3.glsl
+++ /dev/null
@@ -1,55 +0,0 @@
-#version 100
-
-uniform highp float u_skRTHeight;
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uscaleAndTranslate_Stage1;
-uniform mediump vec4 uTexDom_Stage1;
-uniform mediump vec3 uDecalParams_Stage1;
-uniform mediump vec3 uedges_Stage2[4];
-uniform sampler2D uTextureSampler_0_Stage1;
-varying mediump vec4 vcolor_Stage0;
-void main() {
-highp vec4 sk_FragCoord = vec4(gl_FragCoord.x, u_skRTHeight - gl_FragCoord.y, gl_FragCoord.z, gl_FragCoord.w);
- mediump vec4 outputColor_Stage0;
- {
- outputColor_Stage0 = vcolor_Stage0;
- }
- mediump vec4 output_Stage1;
- {
- mediump vec2 coords = sk_FragCoord.xy * uscaleAndTranslate_Stage1.xy + uscaleAndTranslate_Stage1.zw;
- {
- highp vec2 origCoord = coords;
- highp vec2 clampedCoord = clamp(origCoord, uTexDom_Stage1.xy, uTexDom_Stage1.zw);
- mediump vec4 inside = texture2D(uTextureSampler_0_Stage1, clampedCoord).wwww;
- mediump float err = max(abs(clampedCoord.x - origCoord.x) * uDecalParams_Stage1.x, abs(clampedCoord.y - origCoord.y) * uDecalParams_Stage1.y);
- if (err > uDecalParams_Stage1.z) {
- err = 1.0;
- } else if (uDecalParams_Stage1.z < 1.0) {
- err = 0.0;
- }
- output_Stage1 = mix(inside, vec4(0.0, 0.0, 0.0, 0.0), err);
- }
- }
- mediump vec4 output_Stage2;
- {
- mediump float alpha = 1.0;
- mediump float edge;
- edge = dot(uedges_Stage2[0], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- edge = dot(uedges_Stage2[1], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- edge = dot(uedges_Stage2[2], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- edge = dot(uedges_Stage2[3], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- output_Stage2 = output_Stage1 * alpha;
- }
- {
- gl_FragColor = outputColor_Stage0 * output_Stage2;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_decal_ellipse_scaletranslate_texdom_texture.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_decal_ellipse_scaletranslate_texdom_texture.glsl
deleted file mode 100644
index b1749e3..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_decal_ellipse_scaletranslate_texdom_texture.glsl
+++ /dev/null
@@ -1,55 +0,0 @@
-#version 100
-
-uniform highp float u_skRTHeight;
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uscaleAndTranslate_Stage1;
-uniform mediump vec4 uTexDom_Stage1;
-uniform mediump vec3 uDecalParams_Stage1;
-uniform sampler2D uTextureSampler_0_Stage1;
-varying highp vec3 vEllipseOffsets_Stage0;
-varying highp vec4 vEllipseRadii_Stage0;
-varying mediump vec4 vinColor_Stage0;
-void main() {
-highp vec4 sk_FragCoord = vec4(gl_FragCoord.x, u_skRTHeight - gl_FragCoord.y, gl_FragCoord.z, gl_FragCoord.w);
- mediump vec4 outputColor_Stage0;
- mediump vec4 outputCoverage_Stage0;
- {
- outputColor_Stage0 = vinColor_Stage0;
- highp vec2 offset = vEllipseOffsets_Stage0.xy;
- offset *= vEllipseRadii_Stage0.xy;
- highp float test = dot(offset, offset) - 1.0;
- highp vec2 grad = (2.0 * offset) * (vEllipseOffsets_Stage0.z * vEllipseRadii_Stage0.xy);
- highp float grad_dot = dot(grad, grad);
- grad_dot = max(grad_dot, 6.103600026108325e-05);
- highp float invlen = vEllipseOffsets_Stage0.z * inversesqrt(grad_dot);
- highp float edgeAlpha = clamp(0.5 - test * invlen, 0.0, 1.0);
- offset = vEllipseOffsets_Stage0.xy * vEllipseRadii_Stage0.zw;
- test = dot(offset, offset) - 1.0;
- grad = (2.0 * offset) * (vEllipseOffsets_Stage0.z * vEllipseRadii_Stage0.zw);
- grad_dot = dot(grad, grad);
- grad_dot = max(grad_dot, 6.103600026108325e-05);
- invlen = vEllipseOffsets_Stage0.z * inversesqrt(grad_dot);
- edgeAlpha *= clamp(0.5 + test * invlen, 0.0, 1.0);
- outputCoverage_Stage0 = vec4(edgeAlpha);
- }
- mediump vec4 output_Stage1;
- {
- mediump vec2 coords = sk_FragCoord.xy * uscaleAndTranslate_Stage1.xy + uscaleAndTranslate_Stage1.zw;
- {
- highp vec2 origCoord = coords;
- highp vec2 clampedCoord = clamp(origCoord, uTexDom_Stage1.xy, uTexDom_Stage1.zw);
- mediump vec4 inside = texture2D(uTextureSampler_0_Stage1, clampedCoord).wwww * outputCoverage_Stage0;
- mediump float err = max(abs(clampedCoord.x - origCoord.x) * uDecalParams_Stage1.x, abs(clampedCoord.y - origCoord.y) * uDecalParams_Stage1.y);
- if (err > uDecalParams_Stage1.z) {
- err = 1.0;
- } else if (uDecalParams_Stage1.z < 1.0) {
- err = 0.0;
- }
- output_Stage1 = mix(inside, vec4(0.0, 0.0, 0.0, 0.0), err);
- }
- }
- {
- gl_FragColor = outputColor_Stage0 * output_Stage1;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_decal_quad_scaletranslate_texdom_texturew.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_decal_quad_scaletranslate_texdom_texturew.glsl
deleted file mode 100644
index a8c8198..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_decal_quad_scaletranslate_texdom_texturew.glsl
+++ /dev/null
@@ -1,50 +0,0 @@
-#version 100
-
-#extension GL_OES_standard_derivatives : require
-uniform highp float u_skRTHeight;
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uscaleAndTranslate_Stage1;
-uniform mediump vec4 uTexDom_Stage1;
-uniform mediump vec3 uDecalParams_Stage1;
-uniform sampler2D uTextureSampler_0_Stage1;
-varying mediump vec4 vQuadEdge_Stage0;
-varying mediump vec4 vinColor_Stage0;
-void main() {
-highp vec4 sk_FragCoord = vec4(gl_FragCoord.x, u_skRTHeight - gl_FragCoord.y, gl_FragCoord.z, gl_FragCoord.w);
- mediump vec4 outputColor_Stage0;
- mediump vec4 outputCoverage_Stage0;
- {
- outputColor_Stage0 = vinColor_Stage0;
- mediump float edgeAlpha;
- mediump vec2 duvdx = dFdx(vQuadEdge_Stage0.xy);
- mediump vec2 duvdy = -dFdy(vQuadEdge_Stage0.xy);
- if (vQuadEdge_Stage0.z > 0.0 && vQuadEdge_Stage0.w > 0.0) {
- edgeAlpha = min(min(vQuadEdge_Stage0.z, vQuadEdge_Stage0.w) + 0.5, 1.0);
- } else {
- mediump vec2 gF = vec2((2.0 * vQuadEdge_Stage0.x) * duvdx.x - duvdx.y, (2.0 * vQuadEdge_Stage0.x) * duvdy.x - duvdy.y);
- edgeAlpha = vQuadEdge_Stage0.x * vQuadEdge_Stage0.x - vQuadEdge_Stage0.y;
- edgeAlpha = clamp(0.5 - edgeAlpha / length(gF), 0.0, 1.0);
- }
- outputCoverage_Stage0 = vec4(edgeAlpha);
- }
- mediump vec4 output_Stage1;
- {
- mediump vec2 coords = sk_FragCoord.xy * uscaleAndTranslate_Stage1.xy + uscaleAndTranslate_Stage1.zw;
- {
- highp vec2 origCoord = coords;
- highp vec2 clampedCoord = clamp(origCoord, uTexDom_Stage1.xy, uTexDom_Stage1.zw);
- mediump vec4 inside = texture2D(uTextureSampler_0_Stage1, clampedCoord).wwww * outputCoverage_Stage0;
- mediump float err = max(abs(clampedCoord.x - origCoord.x) * uDecalParams_Stage1.x, abs(clampedCoord.y - origCoord.y) * uDecalParams_Stage1.y);
- if (err > uDecalParams_Stage1.z) {
- err = 1.0;
- } else if (uDecalParams_Stage1.z < 1.0) {
- err = 0.0;
- }
- output_Stage1 = mix(inside, vec4(0.0, 0.0, 0.0, 0.0), err);
- }
- }
- {
- gl_FragColor = outputColor_Stage0 * output_Stage1;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_decal_scaletranslate_texdom_texindex_texturew.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_decal_scaletranslate_texdom_texindex_texturew.glsl
deleted file mode 100644
index 32b5458..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_decal_scaletranslate_texdom_texindex_texturew.glsl
+++ /dev/null
@@ -1,45 +0,0 @@
-#version 100
-
-uniform highp float u_skRTHeight;
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uscaleAndTranslate_Stage1;
-uniform mediump vec4 uTexDom_Stage1;
-uniform mediump vec3 uDecalParams_Stage1;
-uniform sampler2D uTextureSampler_0_Stage0;
-uniform sampler2D uTextureSampler_0_Stage1;
-varying highp vec2 vTextureCoords_Stage0;
-varying highp float vTexIndex_Stage0;
-varying mediump vec4 vinColor_Stage0;
-void main() {
-highp vec4 sk_FragCoord = vec4(gl_FragCoord.x, u_skRTHeight - gl_FragCoord.y, gl_FragCoord.z, gl_FragCoord.w);
- mediump vec4 outputColor_Stage0;
- mediump vec4 outputCoverage_Stage0;
- {
- outputColor_Stage0 = vinColor_Stage0;
- mediump vec4 texColor;
- {
- texColor = texture2D(uTextureSampler_0_Stage0, vTextureCoords_Stage0).wwww;
- }
- outputCoverage_Stage0 = texColor;
- }
- mediump vec4 output_Stage1;
- {
- mediump vec2 coords = sk_FragCoord.xy * uscaleAndTranslate_Stage1.xy + uscaleAndTranslate_Stage1.zw;
- {
- highp vec2 origCoord = coords;
- highp vec2 clampedCoord = clamp(origCoord, uTexDom_Stage1.xy, uTexDom_Stage1.zw);
- mediump vec4 inside = texture2D(uTextureSampler_0_Stage1, clampedCoord).wwww * outputCoverage_Stage0;
- mediump float err = max(abs(clampedCoord.x - origCoord.x) * uDecalParams_Stage1.x, abs(clampedCoord.y - origCoord.y) * uDecalParams_Stage1.y);
- if (err > uDecalParams_Stage1.z) {
- err = 1.0;
- } else if (uDecalParams_Stage1.z < 1.0) {
- err = 0.0;
- }
- output_Stage1 = mix(inside, vec4(0.0, 0.0, 0.0, 0.0), err);
- }
- }
- {
- gl_FragColor = outputColor_Stage0 * output_Stage1;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_decal_scaletranslate_texdom_texturew.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_decal_scaletranslate_texdom_texturew.glsl
deleted file mode 100644
index eaca17b..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_decal_scaletranslate_texdom_texturew.glsl
+++ /dev/null
@@ -1,36 +0,0 @@
-#version 100
-
-uniform highp float u_skRTHeight;
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uscaleAndTranslate_Stage1;
-uniform mediump vec4 uTexDom_Stage1;
-uniform mediump vec3 uDecalParams_Stage1;
-uniform sampler2D uTextureSampler_0_Stage1;
-varying mediump vec4 vcolor_Stage0;
-void main() {
-highp vec4 sk_FragCoord = vec4(gl_FragCoord.x, u_skRTHeight - gl_FragCoord.y, gl_FragCoord.z, gl_FragCoord.w);
- mediump vec4 outputColor_Stage0;
- {
- outputColor_Stage0 = vcolor_Stage0;
- }
- mediump vec4 output_Stage1;
- {
- mediump vec2 coords = sk_FragCoord.xy * uscaleAndTranslate_Stage1.xy + uscaleAndTranslate_Stage1.zw;
- {
- highp vec2 origCoord = coords;
- highp vec2 clampedCoord = clamp(origCoord, uTexDom_Stage1.xy, uTexDom_Stage1.zw);
- mediump vec4 inside = texture2D(uTextureSampler_0_Stage1, clampedCoord).wwww;
- mediump float err = max(abs(clampedCoord.x - origCoord.x) * uDecalParams_Stage1.x, abs(clampedCoord.y - origCoord.y) * uDecalParams_Stage1.y);
- if (err > uDecalParams_Stage1.z) {
- err = 1.0;
- } else if (uDecalParams_Stage1.z < 1.0) {
- err = 0.0;
- }
- output_Stage1 = mix(inside, vec4(0.0, 0.0, 0.0, 0.0), err);
- }
- }
- {
- gl_FragColor = outputColor_Stage0 * output_Stage1;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_decal_scaletranslate_texdom_texturew_2.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_decal_scaletranslate_texdom_texturew_2.glsl
deleted file mode 100644
index 4092ef8..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_decal_scaletranslate_texdom_texturew_2.glsl
+++ /dev/null
@@ -1,45 +0,0 @@
-#version 100
-
-uniform highp float u_skRTHeight;
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uscaleAndTranslate_Stage2;
-uniform mediump vec4 uTexDom_Stage2;
-uniform mediump vec3 uDecalParams_Stage2;
-uniform sampler2D uTextureSampler_0_Stage1;
-uniform sampler2D uTextureSampler_0_Stage2;
-varying highp vec2 vTransformedCoords_0_Stage0;
-varying mediump vec4 vcolor_Stage0;
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1992;
- _sample1992 = _input * texture2D(uTextureSampler_0_Stage1, vTransformedCoords_0_Stage0);
- return _sample1992;
-}
-void main() {
-highp vec4 sk_FragCoord = vec4(gl_FragCoord.x, u_skRTHeight - gl_FragCoord.y, gl_FragCoord.z, gl_FragCoord.w);
- mediump vec4 output_Stage1;
- {
- mediump vec4 _sample1992;
- _sample1992 = stage_Stage1_c0_c0(vec4(1.0, 1.0, 1.0, 1.0));
- output_Stage1 = _sample1992;
- }
- mediump vec4 output_Stage2;
- {
- mediump vec2 coords = sk_FragCoord.xy * uscaleAndTranslate_Stage2.xy + uscaleAndTranslate_Stage2.zw;
- {
- highp vec2 origCoord = coords;
- highp vec2 clampedCoord = clamp(origCoord, uTexDom_Stage2.xy, uTexDom_Stage2.zw);
- mediump vec4 inside = texture2D(uTextureSampler_0_Stage2, clampedCoord).wwww;
- mediump float err = max(abs(clampedCoord.x - origCoord.x) * uDecalParams_Stage2.x, abs(clampedCoord.y - origCoord.y) * uDecalParams_Stage2.y);
- if (err > uDecalParams_Stage2.z) {
- err = 1.0;
- } else if (uDecalParams_Stage2.z < 1.0) {
- err = 0.0;
- }
- output_Stage2 = mix(inside, vec4(0.0, 0.0, 0.0, 0.0), err);
- }
- }
- {
- gl_FragColor = output_Stage1 * output_Stage2;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_decal_scaletranslate_texdom_texturew_3.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_decal_scaletranslate_texdom_texturew_3.glsl
deleted file mode 100644
index 2845edf..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_decal_scaletranslate_texdom_texturew_3.glsl
+++ /dev/null
@@ -1,42 +0,0 @@
-#version 100
-
-uniform highp float u_skRTHeight;
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uscaleAndTranslate_Stage2;
-uniform mediump vec4 uTexDom_Stage2;
-uniform mediump vec3 uDecalParams_Stage2;
-uniform sampler2D uTextureSampler_0_Stage1;
-uniform sampler2D uTextureSampler_0_Stage2;
-varying highp vec2 vTransformedCoords_0_Stage0;
-varying mediump vec4 vcolor_Stage0;
-void main() {
-highp vec4 sk_FragCoord = vec4(gl_FragCoord.x, u_skRTHeight - gl_FragCoord.y, gl_FragCoord.z, gl_FragCoord.w);
- mediump vec4 outputColor_Stage0;
- {
- outputColor_Stage0 = vcolor_Stage0;
- }
- mediump vec4 output_Stage1;
- {
- output_Stage1 = texture2D(uTextureSampler_0_Stage1, vTransformedCoords_0_Stage0).wwww;
- }
- mediump vec4 output_Stage2;
- {
- mediump vec2 coords = sk_FragCoord.xy * uscaleAndTranslate_Stage2.xy + uscaleAndTranslate_Stage2.zw;
- {
- highp vec2 origCoord = coords;
- highp vec2 clampedCoord = clamp(origCoord, uTexDom_Stage2.xy, uTexDom_Stage2.zw);
- mediump vec4 inside = texture2D(uTextureSampler_0_Stage2, clampedCoord).wwww * output_Stage1;
- mediump float err = max(abs(clampedCoord.x - origCoord.x) * uDecalParams_Stage2.x, abs(clampedCoord.y - origCoord.y) * uDecalParams_Stage2.y);
- if (err > uDecalParams_Stage2.z) {
- err = 1.0;
- } else if (uDecalParams_Stage2.z < 1.0) {
- err = 0.0;
- }
- output_Stage2 = mix(inside, vec4(0.0, 0.0, 0.0, 0.0), err);
- }
- }
- {
- gl_FragColor = outputColor_Stage0 * output_Stage2;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_edges.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_edges.glsl
deleted file mode 100644
index ec0b725..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_edges.glsl
+++ /dev/null
@@ -1,32 +0,0 @@
-#version 100
-
-uniform highp float u_skRTHeight;
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec3 uedges_Stage1[3];
-varying mediump vec4 vcolor_Stage0;
-void main() {
-highp vec4 sk_FragCoord = vec4(gl_FragCoord.x, u_skRTHeight - gl_FragCoord.y, gl_FragCoord.z, gl_FragCoord.w);
- mediump vec4 outputColor_Stage0;
- {
- outputColor_Stage0 = vcolor_Stage0;
- }
- mediump vec4 output_Stage1;
- {
- mediump float alpha = 1.0;
- mediump float edge;
- edge = dot(uedges_Stage1[0], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- edge = dot(uedges_Stage1[1], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- edge = dot(uedges_Stage1[2], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- output_Stage1 = vec4(alpha);
- }
- {
- gl_FragColor = outputColor_Stage0 * output_Stage1;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_edges_2.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_edges_2.glsl
deleted file mode 100644
index 40b5af2..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_edges_2.glsl
+++ /dev/null
@@ -1,35 +0,0 @@
-#version 100
-
-uniform highp float u_skRTHeight;
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec3 uedges_Stage1[4];
-varying mediump vec4 vcolor_Stage0;
-void main() {
-highp vec4 sk_FragCoord = vec4(gl_FragCoord.x, u_skRTHeight - gl_FragCoord.y, gl_FragCoord.z, gl_FragCoord.w);
- mediump vec4 outputColor_Stage0;
- {
- outputColor_Stage0 = vcolor_Stage0;
- }
- mediump vec4 output_Stage1;
- {
- mediump float alpha = 1.0;
- mediump float edge;
- edge = dot(uedges_Stage1[0], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- edge = dot(uedges_Stage1[1], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- edge = dot(uedges_Stage1[2], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- edge = dot(uedges_Stage1[3], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- output_Stage1 = vec4(alpha);
- }
- {
- gl_FragColor = outputColor_Stage0 * output_Stage1;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_edges_quad.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_edges_quad.glsl
deleted file mode 100644
index e9945c7..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_edges_quad.glsl
+++ /dev/null
@@ -1,49 +0,0 @@
-#version 100
-
-#extension GL_OES_standard_derivatives : require
-uniform highp float u_skRTHeight;
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec3 uedges_Stage1[4];
-varying mediump vec4 vQuadEdge_Stage0;
-varying mediump vec4 vinColor_Stage0;
-void main() {
-highp vec4 sk_FragCoord = vec4(gl_FragCoord.x, u_skRTHeight - gl_FragCoord.y, gl_FragCoord.z, gl_FragCoord.w);
- mediump vec4 outputColor_Stage0;
- mediump vec4 outputCoverage_Stage0;
- {
- outputColor_Stage0 = vinColor_Stage0;
- mediump float edgeAlpha;
- mediump vec2 duvdx = dFdx(vQuadEdge_Stage0.xy);
- mediump vec2 duvdy = -dFdy(vQuadEdge_Stage0.xy);
- if (vQuadEdge_Stage0.z > 0.0 && vQuadEdge_Stage0.w > 0.0) {
- edgeAlpha = min(min(vQuadEdge_Stage0.z, vQuadEdge_Stage0.w) + 0.5, 1.0);
- } else {
- mediump vec2 gF = vec2((2.0 * vQuadEdge_Stage0.x) * duvdx.x - duvdx.y, (2.0 * vQuadEdge_Stage0.x) * duvdy.x - duvdy.y);
- edgeAlpha = vQuadEdge_Stage0.x * vQuadEdge_Stage0.x - vQuadEdge_Stage0.y;
- edgeAlpha = clamp(0.5 - edgeAlpha / length(gF), 0.0, 1.0);
- }
- outputCoverage_Stage0 = vec4(edgeAlpha);
- }
- mediump vec4 output_Stage1;
- {
- mediump float alpha = 1.0;
- mediump float edge;
- edge = dot(uedges_Stage1[0], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- edge = dot(uedges_Stage1[1], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- edge = dot(uedges_Stage1[2], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- edge = dot(uedges_Stage1[3], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- output_Stage1 = outputCoverage_Stage0 * alpha;
- }
- {
- gl_FragColor = outputColor_Stage0 * output_Stage1;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_edges_quad_2.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_edges_quad_2.glsl
deleted file mode 100644
index fab5012..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_edges_quad_2.glsl
+++ /dev/null
@@ -1,52 +0,0 @@
-#version 100
-
-#extension GL_OES_standard_derivatives : require
-uniform highp float u_skRTHeight;
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec3 uedges_Stage1[5];
-varying mediump vec4 vQuadEdge_Stage0;
-varying mediump vec4 vinColor_Stage0;
-void main() {
-highp vec4 sk_FragCoord = vec4(gl_FragCoord.x, u_skRTHeight - gl_FragCoord.y, gl_FragCoord.z, gl_FragCoord.w);
- mediump vec4 outputColor_Stage0;
- mediump vec4 outputCoverage_Stage0;
- {
- outputColor_Stage0 = vinColor_Stage0;
- mediump float edgeAlpha;
- mediump vec2 duvdx = dFdx(vQuadEdge_Stage0.xy);
- mediump vec2 duvdy = -dFdy(vQuadEdge_Stage0.xy);
- if (vQuadEdge_Stage0.z > 0.0 && vQuadEdge_Stage0.w > 0.0) {
- edgeAlpha = min(min(vQuadEdge_Stage0.z, vQuadEdge_Stage0.w) + 0.5, 1.0);
- } else {
- mediump vec2 gF = vec2((2.0 * vQuadEdge_Stage0.x) * duvdx.x - duvdx.y, (2.0 * vQuadEdge_Stage0.x) * duvdy.x - duvdy.y);
- edgeAlpha = vQuadEdge_Stage0.x * vQuadEdge_Stage0.x - vQuadEdge_Stage0.y;
- edgeAlpha = clamp(0.5 - edgeAlpha / length(gF), 0.0, 1.0);
- }
- outputCoverage_Stage0 = vec4(edgeAlpha);
- }
- mediump vec4 output_Stage1;
- {
- mediump float alpha = 1.0;
- mediump float edge;
- edge = dot(uedges_Stage1[0], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- edge = dot(uedges_Stage1[1], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- edge = dot(uedges_Stage1[2], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- edge = dot(uedges_Stage1[3], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- edge = dot(uedges_Stage1[4], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- output_Stage1 = outputCoverage_Stage0 * alpha;
- }
- {
- gl_FragColor = outputColor_Stage0 * output_Stage1;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_edges_quad_3.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_edges_quad_3.glsl
deleted file mode 100644
index bae399a..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_edges_quad_3.glsl
+++ /dev/null
@@ -1,46 +0,0 @@
-#version 100
-
-#extension GL_OES_standard_derivatives : require
-uniform highp float u_skRTHeight;
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec3 uedges_Stage1[3];
-varying mediump vec4 vQuadEdge_Stage0;
-varying mediump vec4 vinColor_Stage0;
-void main() {
-highp vec4 sk_FragCoord = vec4(gl_FragCoord.x, u_skRTHeight - gl_FragCoord.y, gl_FragCoord.z, gl_FragCoord.w);
- mediump vec4 outputColor_Stage0;
- mediump vec4 outputCoverage_Stage0;
- {
- outputColor_Stage0 = vinColor_Stage0;
- mediump float edgeAlpha;
- mediump vec2 duvdx = dFdx(vQuadEdge_Stage0.xy);
- mediump vec2 duvdy = -dFdy(vQuadEdge_Stage0.xy);
- if (vQuadEdge_Stage0.z > 0.0 && vQuadEdge_Stage0.w > 0.0) {
- edgeAlpha = min(min(vQuadEdge_Stage0.z, vQuadEdge_Stage0.w) + 0.5, 1.0);
- } else {
- mediump vec2 gF = vec2((2.0 * vQuadEdge_Stage0.x) * duvdx.x - duvdx.y, (2.0 * vQuadEdge_Stage0.x) * duvdy.x - duvdy.y);
- edgeAlpha = vQuadEdge_Stage0.x * vQuadEdge_Stage0.x - vQuadEdge_Stage0.y;
- edgeAlpha = clamp(0.5 - edgeAlpha / length(gF), 0.0, 1.0);
- }
- outputCoverage_Stage0 = vec4(edgeAlpha);
- }
- mediump vec4 output_Stage1;
- {
- mediump float alpha = 1.0;
- mediump float edge;
- edge = dot(uedges_Stage1[0], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- edge = dot(uedges_Stage1[1], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- edge = dot(uedges_Stage1[2], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- output_Stage1 = outputCoverage_Stage0 * alpha;
- }
- {
- gl_FragColor = outputColor_Stage0 * output_Stage1;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_edges_texindex_texturew.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_edges_texindex_texturew.glsl
deleted file mode 100644
index 252fad4..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_edges_texindex_texturew.glsl
+++ /dev/null
@@ -1,44 +0,0 @@
-#version 100
-
-uniform highp float u_skRTHeight;
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec3 uedges_Stage1[4];
-uniform sampler2D uTextureSampler_0_Stage0;
-varying highp vec2 vTextureCoords_Stage0;
-varying highp float vTexIndex_Stage0;
-varying mediump vec4 vinColor_Stage0;
-void main() {
-highp vec4 sk_FragCoord = vec4(gl_FragCoord.x, u_skRTHeight - gl_FragCoord.y, gl_FragCoord.z, gl_FragCoord.w);
- mediump vec4 outputColor_Stage0;
- mediump vec4 outputCoverage_Stage0;
- {
- outputColor_Stage0 = vinColor_Stage0;
- mediump vec4 texColor;
- {
- texColor = texture2D(uTextureSampler_0_Stage0, vTextureCoords_Stage0).wwww;
- }
- outputCoverage_Stage0 = texColor;
- }
- mediump vec4 output_Stage1;
- {
- mediump float alpha = 1.0;
- mediump float edge;
- edge = dot(uedges_Stage1[0], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- edge = dot(uedges_Stage1[1], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- edge = dot(uedges_Stage1[2], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- edge = dot(uedges_Stage1[3], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- output_Stage1 = outputCoverage_Stage0 * alpha;
- }
- {
- gl_FragColor = outputColor_Stage0 * output_Stage1;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_edges_texindex_texturew_2.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_edges_texindex_texturew_2.glsl
deleted file mode 100644
index fda3d24..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_edges_texindex_texturew_2.glsl
+++ /dev/null
@@ -1,51 +0,0 @@
-#version 100
-
-#extension GL_OES_standard_derivatives : require
-uniform highp float u_skRTHeight;
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec3 uedges_Stage1[4];
-uniform sampler2D uTextureSampler_0_Stage0;
-varying highp vec2 vTextureCoords_Stage0;
-varying highp float vTexIndex_Stage0;
-varying highp vec2 vIntTextureCoords_Stage0;
-varying mediump vec4 vinColor_Stage0;
-void main() {
-highp vec4 sk_FragCoord = vec4(gl_FragCoord.x, u_skRTHeight - gl_FragCoord.y, gl_FragCoord.z, gl_FragCoord.w);
- mediump vec4 outputColor_Stage0;
- mediump vec4 outputCoverage_Stage0;
- {
- outputColor_Stage0 = vinColor_Stage0;
- highp vec2 uv = vTextureCoords_Stage0;
- mediump vec4 texColor;
- {
- texColor = texture2D(uTextureSampler_0_Stage0, uv).wwww;
- }
- mediump float distance = 7.96875 * (texColor.x - 0.501960813999176);
- mediump float afwidth;
- afwidth = abs(0.6499999761581421 * -dFdy(vIntTextureCoords_Stage0.y));
- mediump float val = smoothstep(-afwidth, afwidth, distance);
- outputCoverage_Stage0 = vec4(val);
- }
- mediump vec4 output_Stage1;
- {
- mediump float alpha = 1.0;
- mediump float edge;
- edge = dot(uedges_Stage1[0], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- edge = dot(uedges_Stage1[1], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- edge = dot(uedges_Stage1[2], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- edge = dot(uedges_Stage1[3], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- output_Stage1 = outputCoverage_Stage0 * alpha;
- }
- {
- gl_FragColor = outputColor_Stage0 * output_Stage1;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_edges_texindex_texturew_3.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_edges_texindex_texturew_3.glsl
deleted file mode 100644
index 837a27d..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_edges_texindex_texturew_3.glsl
+++ /dev/null
@@ -1,53 +0,0 @@
-#version 100
-
-uniform highp float u_skRTHeight;
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec3 uedges_Stage1[7];
-uniform sampler2D uTextureSampler_0_Stage0;
-varying highp vec2 vTextureCoords_Stage0;
-varying highp float vTexIndex_Stage0;
-varying mediump vec4 vinColor_Stage0;
-void main() {
-highp vec4 sk_FragCoord = vec4(gl_FragCoord.x, u_skRTHeight - gl_FragCoord.y, gl_FragCoord.z, gl_FragCoord.w);
- mediump vec4 outputColor_Stage0;
- mediump vec4 outputCoverage_Stage0;
- {
- outputColor_Stage0 = vinColor_Stage0;
- mediump vec4 texColor;
- {
- texColor = texture2D(uTextureSampler_0_Stage0, vTextureCoords_Stage0).wwww;
- }
- outputCoverage_Stage0 = texColor;
- }
- mediump vec4 output_Stage1;
- {
- mediump float alpha = 1.0;
- mediump float edge;
- edge = dot(uedges_Stage1[0], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- edge = dot(uedges_Stage1[1], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- edge = dot(uedges_Stage1[2], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- edge = dot(uedges_Stage1[3], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- edge = dot(uedges_Stage1[4], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- edge = dot(uedges_Stage1[5], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- edge = dot(uedges_Stage1[6], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- output_Stage1 = outputCoverage_Stage0 * alpha;
- }
- {
- gl_FragColor = outputColor_Stage0 * output_Stage1;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_edges_texture.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_edges_texture.glsl
deleted file mode 100644
index 72554a1..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_edges_texture.glsl
+++ /dev/null
@@ -1,40 +0,0 @@
-#version 100
-
-uniform highp float u_skRTHeight;
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec3 uedges_Stage1[4];
-uniform sampler2D uTextureSampler_0_Stage0;
-varying mediump vec4 vcolor_Stage0;
-varying highp vec2 vlocalCoord_Stage0;
-void main() {
-highp vec4 sk_FragCoord = vec4(gl_FragCoord.x, u_skRTHeight - gl_FragCoord.y, gl_FragCoord.z, gl_FragCoord.w);
- mediump vec4 outputColor_Stage0;
- {
- outputColor_Stage0 = vcolor_Stage0;
- highp vec2 texCoord;
- texCoord = vlocalCoord_Stage0;
- outputColor_Stage0 = texture2D(uTextureSampler_0_Stage0, texCoord) * outputColor_Stage0;
- }
- mediump vec4 output_Stage1;
- {
- mediump float alpha = 1.0;
- mediump float edge;
- edge = dot(uedges_Stage1[0], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = edge >= 0.5 ? 1.0 : 0.0;
- alpha *= edge;
- edge = dot(uedges_Stage1[1], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = edge >= 0.5 ? 1.0 : 0.0;
- alpha *= edge;
- edge = dot(uedges_Stage1[2], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = edge >= 0.5 ? 1.0 : 0.0;
- alpha *= edge;
- edge = dot(uedges_Stage1[3], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = edge >= 0.5 ? 1.0 : 0.0;
- alpha *= edge;
- output_Stage1 = vec4(alpha);
- }
- {
- gl_FragColor = outputColor_Stage0 * output_Stage1;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_edges_texture_2.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_edges_texture_2.glsl
deleted file mode 100644
index 67c3c03..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_edges_texture_2.glsl
+++ /dev/null
@@ -1,48 +0,0 @@
-#version 100
-
-uniform highp float u_skRTHeight;
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec3 uedges_Stage2[4];
-uniform sampler2D uTextureSampler_0_Stage1;
-varying highp vec2 vTransformedCoords_0_Stage0;
-varying mediump vec4 vcolor_Stage0;
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 child;
- child = _input * texture2D(uTextureSampler_0_Stage1, vTransformedCoords_0_Stage0);
- return child;
-}
-void main() {
-highp vec4 sk_FragCoord = vec4(gl_FragCoord.x, u_skRTHeight - gl_FragCoord.y, gl_FragCoord.z, gl_FragCoord.w);
- mediump vec4 outputColor_Stage0;
- {
- outputColor_Stage0 = vcolor_Stage0;
- }
- mediump vec4 output_Stage1;
- {
- mediump vec4 child;
- child = stage_Stage1_c0_c0(vec4(1.0));
- output_Stage1 = child * outputColor_Stage0.w;
- }
- mediump vec4 output_Stage2;
- {
- mediump float alpha = 1.0;
- mediump float edge;
- edge = dot(uedges_Stage2[0], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- edge = dot(uedges_Stage2[1], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- edge = dot(uedges_Stage2[2], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- edge = dot(uedges_Stage2[3], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- output_Stage2 = vec4(alpha);
- }
- {
- gl_FragColor = output_Stage1 * output_Stage2;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_edges_texture_3.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_edges_texture_3.glsl
deleted file mode 100644
index b45b6ef..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_edges_texture_3.glsl
+++ /dev/null
@@ -1,62 +0,0 @@
-#version 100
-
-uniform highp float u_skRTHeight;
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 ucolor_Stage2_c1_c0;
-uniform mediump vec3 uedges_Stage3[4];
-uniform sampler2D uTextureSampler_0_Stage1;
-varying highp vec2 vTransformedCoords_0_Stage0;
-varying mediump vec4 vcolor_Stage0;
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 child;
- child = _input * texture2D(uTextureSampler_0_Stage1, vTransformedCoords_0_Stage0);
- return child;
-}
-mediump vec4 stage_Stage2_c1_c0(mediump vec4 _input) {
- mediump vec4 child;
- {
- child = ucolor_Stage2_c1_c0;
- }
- return child;
-}
-void main() {
-highp vec4 sk_FragCoord = vec4(gl_FragCoord.x, u_skRTHeight - gl_FragCoord.y, gl_FragCoord.z, gl_FragCoord.w);
- mediump vec4 outputColor_Stage0;
- {
- outputColor_Stage0 = vcolor_Stage0;
- }
- mediump vec4 output_Stage1;
- {
- mediump vec4 child;
- child = stage_Stage1_c0_c0(vec4(1.0));
- output_Stage1 = child * outputColor_Stage0.w;
- }
- mediump vec4 output_Stage2;
- {
- mediump vec4 child;
- child = stage_Stage2_c1_c0(vec4(1.0));
- output_Stage2 = child * output_Stage1.w;
- }
- mediump vec4 output_Stage3;
- {
- mediump float alpha = 1.0;
- mediump float edge;
- edge = dot(uedges_Stage3[0], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- edge = dot(uedges_Stage3[1], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- edge = dot(uedges_Stage3[2], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- edge = dot(uedges_Stage3[3], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- output_Stage3 = vec4(alpha);
- }
- {
- gl_FragColor = output_Stage2 * output_Stage3;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_edges_texturew.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_edges_texturew.glsl
deleted file mode 100644
index 01a9f50..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_edges_texturew.glsl
+++ /dev/null
@@ -1,44 +0,0 @@
-#version 100
-
-uniform highp float u_skRTHeight;
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec3 uedges_Stage2[5];
-uniform sampler2D uTextureSampler_0_Stage1;
-varying highp vec2 vTransformedCoords_0_Stage0;
-varying mediump vec4 vcolor_Stage0;
-void main() {
-highp vec4 sk_FragCoord = vec4(gl_FragCoord.x, u_skRTHeight - gl_FragCoord.y, gl_FragCoord.z, gl_FragCoord.w);
- mediump vec4 outputColor_Stage0;
- {
- outputColor_Stage0 = vcolor_Stage0;
- }
- mediump vec4 output_Stage1;
- {
- output_Stage1 = texture2D(uTextureSampler_0_Stage1, vTransformedCoords_0_Stage0).wwww;
- }
- mediump vec4 output_Stage2;
- {
- mediump float alpha = 1.0;
- mediump float edge;
- edge = dot(uedges_Stage2[0], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- edge = dot(uedges_Stage2[1], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- edge = dot(uedges_Stage2[2], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- edge = dot(uedges_Stage2[3], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- edge = dot(uedges_Stage2[4], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- output_Stage2 = output_Stage1 * alpha;
- }
- {
- gl_FragColor = outputColor_Stage0 * output_Stage2;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_edges_texturew_2.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_edges_texturew_2.glsl
deleted file mode 100644
index 6d63e22..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_edges_texturew_2.glsl
+++ /dev/null
@@ -1,42 +0,0 @@
-#version 100
-
-uniform highp float u_skRTHeight;
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec3 uedges_Stage2[4];
-uniform sampler2D uTextureSampler_0_Stage1;
-varying highp vec3 vTransformedCoords_0_Stage0;
-varying mediump vec4 vcolor_Stage0;
-void main() {
-highp vec4 sk_FragCoord = vec4(gl_FragCoord.x, u_skRTHeight - gl_FragCoord.y, gl_FragCoord.z, gl_FragCoord.w);
- mediump vec4 outputColor_Stage0;
- {
- outputColor_Stage0 = vcolor_Stage0;
- }
- mediump vec4 output_Stage1;
- {
- highp vec2 vTransformedCoords_0_Stage0_ensure2D = vTransformedCoords_0_Stage0.xy / vTransformedCoords_0_Stage0.z;
- output_Stage1 = texture2D(uTextureSampler_0_Stage1, vTransformedCoords_0_Stage0_ensure2D).wwww;
- }
- mediump vec4 output_Stage2;
- {
- mediump float alpha = 1.0;
- mediump float edge;
- edge = dot(uedges_Stage2[0], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- edge = dot(uedges_Stage2[1], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- edge = dot(uedges_Stage2[2], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- edge = dot(uedges_Stage2[3], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- output_Stage2 = output_Stage1 * alpha;
- }
- {
- gl_FragColor = outputColor_Stage0 * output_Stage2;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_ellipse.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_ellipse.glsl
deleted file mode 100644
index 8b55db4..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_ellipse.glsl
+++ /dev/null
@@ -1,25 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-varying highp vec3 vEllipseOffsets_Stage0;
-varying highp vec4 vEllipseRadii_Stage0;
-varying mediump vec4 vinColor_Stage0;
-void main() {
- mediump vec4 outputColor_Stage0;
- mediump vec4 outputCoverage_Stage0;
- {
- outputColor_Stage0 = vinColor_Stage0;
- highp vec2 offset = vEllipseOffsets_Stage0.xy;
- highp float test = dot(offset, offset) - 1.0;
- highp vec2 grad = (2.0 * offset) * (vEllipseOffsets_Stage0.z * vEllipseRadii_Stage0.xy);
- highp float grad_dot = dot(grad, grad);
- grad_dot = max(grad_dot, 6.103600026108325e-05);
- highp float invlen = vEllipseOffsets_Stage0.z * inversesqrt(grad_dot);
- highp float edgeAlpha = clamp(0.5 - test * invlen, 0.0, 1.0);
- outputCoverage_Stage0 = vec4(edgeAlpha);
- }
- {
- gl_FragColor = outputColor_Stage0 * outputCoverage_Stage0;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_ellipse_2.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_ellipse_2.glsl
deleted file mode 100644
index dd2801e..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_ellipse_2.glsl
+++ /dev/null
@@ -1,41 +0,0 @@
-#version 100
-
-#extension GL_OES_standard_derivatives : require
-precision mediump float;
-precision mediump sampler2D;
-varying highp vec3 vEllipseOffsets0_Stage0;
-varying highp vec2 vEllipseOffsets1_Stage0;
-varying mediump vec4 vinColor_Stage0;
-void main() {
- mediump vec4 outputColor_Stage0;
- mediump vec4 outputCoverage_Stage0;
- {
- outputColor_Stage0 = vinColor_Stage0;
- highp vec2 scaledOffset = vEllipseOffsets0_Stage0.xy;
- highp float test = dot(scaledOffset, scaledOffset) - 1.0;
- highp vec2 duvdx = dFdx(vEllipseOffsets0_Stage0.xy);
- highp vec2 duvdy = -dFdy(vEllipseOffsets0_Stage0.xy);
- highp vec2 grad = vec2(vEllipseOffsets0_Stage0.x * duvdx.x + vEllipseOffsets0_Stage0.y * duvdx.y, vEllipseOffsets0_Stage0.x * duvdy.x + vEllipseOffsets0_Stage0.y * duvdy.y);
- grad *= vEllipseOffsets0_Stage0.z;
- highp float grad_dot = 4.0 * dot(grad, grad);
- grad_dot = max(grad_dot, 6.103600026108325e-05);
- highp float invlen = inversesqrt(grad_dot);
- invlen *= vEllipseOffsets0_Stage0.z;
- highp float edgeAlpha = clamp(0.5 - test * invlen, 0.0, 1.0);
- scaledOffset = vEllipseOffsets1_Stage0;
- test = dot(scaledOffset, scaledOffset) - 1.0;
- duvdx = dFdx(vEllipseOffsets1_Stage0);
- duvdy = -dFdy(vEllipseOffsets1_Stage0);
- grad = vec2(vEllipseOffsets1_Stage0.x * duvdx.x + vEllipseOffsets1_Stage0.y * duvdx.y, vEllipseOffsets1_Stage0.x * duvdy.x + vEllipseOffsets1_Stage0.y * duvdy.y);
- grad *= vEllipseOffsets0_Stage0.z;
- grad_dot = 4.0 * dot(grad, grad);
- grad_dot = max(grad_dot, 6.103600026108325e-05);
- invlen = inversesqrt(grad_dot);
- invlen *= vEllipseOffsets0_Stage0.z;
- edgeAlpha *= clamp(0.5 + test * invlen, 0.0, 1.0);
- outputCoverage_Stage0 = vec4(edgeAlpha);
- }
- {
- gl_FragColor = outputColor_Stage0 * outputCoverage_Stage0;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_ellipse_3.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_ellipse_3.glsl
deleted file mode 100644
index ff7654e..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_ellipse_3.glsl
+++ /dev/null
@@ -1,30 +0,0 @@
-#version 100
-
-#extension GL_OES_standard_derivatives : require
-precision mediump float;
-precision mediump sampler2D;
-varying highp vec3 vEllipseOffsets0_Stage0;
-varying highp vec2 vEllipseOffsets1_Stage0;
-varying mediump vec4 vinColor_Stage0;
-void main() {
- mediump vec4 outputColor_Stage0;
- mediump vec4 outputCoverage_Stage0;
- {
- outputColor_Stage0 = vinColor_Stage0;
- highp vec2 scaledOffset = vEllipseOffsets0_Stage0.xy;
- highp float test = dot(scaledOffset, scaledOffset) - 1.0;
- highp vec2 duvdx = dFdx(vEllipseOffsets0_Stage0.xy);
- highp vec2 duvdy = -dFdy(vEllipseOffsets0_Stage0.xy);
- highp vec2 grad = vec2(vEllipseOffsets0_Stage0.x * duvdx.x + vEllipseOffsets0_Stage0.y * duvdx.y, vEllipseOffsets0_Stage0.x * duvdy.x + vEllipseOffsets0_Stage0.y * duvdy.y);
- grad *= vEllipseOffsets0_Stage0.z;
- highp float grad_dot = 4.0 * dot(grad, grad);
- grad_dot = max(grad_dot, 6.103600026108325e-05);
- highp float invlen = inversesqrt(grad_dot);
- invlen *= vEllipseOffsets0_Stage0.z;
- highp float edgeAlpha = clamp(0.5 - test * invlen, 0.0, 1.0);
- outputCoverage_Stage0 = vec4(edgeAlpha);
- }
- {
- gl_FragColor = outputColor_Stage0 * outputCoverage_Stage0;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_ellipse_scale_texture.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_ellipse_scale_texture.glsl
deleted file mode 100644
index 4676e51..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_ellipse_scale_texture.glsl
+++ /dev/null
@@ -1,45 +0,0 @@
-#version 100
-
-uniform highp float u_skRTHeight;
-precision mediump float;
-precision mediump sampler2D;
-uniform highp vec4 uellipse_Stage1;
-uniform highp vec2 uscale_Stage1;
-uniform sampler2D uTextureSampler_0_Stage0;
-varying mediump vec4 vcolor_Stage0;
-varying highp vec2 vlocalCoord_Stage0;
-void main() {
-highp vec4 sk_FragCoord = vec4(gl_FragCoord.x, u_skRTHeight - gl_FragCoord.y, gl_FragCoord.z, gl_FragCoord.w);
- mediump vec4 outputColor_Stage0;
- {
- outputColor_Stage0 = vcolor_Stage0;
- highp vec2 texCoord;
- texCoord = vlocalCoord_Stage0;
- outputColor_Stage0 = texture2D(uTextureSampler_0_Stage0, texCoord) * outputColor_Stage0;
- }
- mediump vec4 output_Stage1;
- {
- highp vec2 d = sk_FragCoord.xy - uellipse_Stage1.xy;
- {
- d *= uscale_Stage1.y;
- }
- highp vec2 Z = d * uellipse_Stage1.zw;
- highp float implicit = dot(Z, d) - 1.0;
- highp float grad_dot = 4.0 * dot(Z, Z);
- {
- grad_dot = max(grad_dot, 6.103600026108325e-05);
- }
- highp float approx_dist = implicit * inversesqrt(grad_dot);
- {
- approx_dist *= uscale_Stage1.x;
- }
- mediump float alpha;
- {
- alpha = clamp(0.5 - approx_dist, 0.0, 1.0);
- }
- output_Stage1 = vec4(alpha);
- }
- {
- gl_FragColor = outputColor_Stage0 * output_Stage1;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_hairquad.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_hairquad.glsl
deleted file mode 100644
index 73e225c..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_hairquad.glsl
+++ /dev/null
@@ -1,25 +0,0 @@
-#version 100
-
-#extension GL_OES_standard_derivatives : require
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uColor_Stage0;
-varying mediump vec4 vHairQuadEdge_Stage0;
-void main() {
- mediump vec4 outputColor_Stage0;
- mediump vec4 outputCoverage_Stage0;
- {
- outputColor_Stage0 = uColor_Stage0;
- mediump float edgeAlpha;
- mediump vec2 duvdx = dFdx(vHairQuadEdge_Stage0.xy);
- mediump vec2 duvdy = -dFdy(vHairQuadEdge_Stage0.xy);
- mediump vec2 gF = vec2((2.0 * vHairQuadEdge_Stage0.x) * duvdx.x - duvdx.y, (2.0 * vHairQuadEdge_Stage0.x) * duvdy.x - duvdy.y);
- edgeAlpha = vHairQuadEdge_Stage0.x * vHairQuadEdge_Stage0.x - vHairQuadEdge_Stage0.y;
- edgeAlpha = sqrt((edgeAlpha * edgeAlpha) / dot(gF, gF));
- edgeAlpha = max(1.0 - edgeAlpha, 0.0);
- outputCoverage_Stage0 = vec4(edgeAlpha);
- }
- {
- gl_FragColor = outputColor_Stage0 * outputCoverage_Stage0;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_quad.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_quad.glsl
deleted file mode 100644
index e6067b5..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_quad.glsl
+++ /dev/null
@@ -1,28 +0,0 @@
-#version 100
-
-#extension GL_OES_standard_derivatives : require
-precision mediump float;
-precision mediump sampler2D;
-varying mediump vec4 vQuadEdge_Stage0;
-varying mediump vec4 vinColor_Stage0;
-void main() {
- mediump vec4 outputColor_Stage0;
- mediump vec4 outputCoverage_Stage0;
- {
- outputColor_Stage0 = vinColor_Stage0;
- mediump float edgeAlpha;
- mediump vec2 duvdx = dFdx(vQuadEdge_Stage0.xy);
- mediump vec2 duvdy = -dFdy(vQuadEdge_Stage0.xy);
- if (vQuadEdge_Stage0.z > 0.0 && vQuadEdge_Stage0.w > 0.0) {
- edgeAlpha = min(min(vQuadEdge_Stage0.z, vQuadEdge_Stage0.w) + 0.5, 1.0);
- } else {
- mediump vec2 gF = vec2((2.0 * vQuadEdge_Stage0.x) * duvdx.x - duvdx.y, (2.0 * vQuadEdge_Stage0.x) * duvdy.x - duvdy.y);
- edgeAlpha = vQuadEdge_Stage0.x * vQuadEdge_Stage0.x - vQuadEdge_Stage0.y;
- edgeAlpha = clamp(0.5 - edgeAlpha / length(gF), 0.0, 1.0);
- }
- outputCoverage_Stage0 = vec4(edgeAlpha);
- }
- {
- gl_FragColor = outputColor_Stage0 * outputCoverage_Stage0;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_quad_rect.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_quad_rect.glsl
deleted file mode 100644
index 10468cb..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_quad_rect.glsl
+++ /dev/null
@@ -1,44 +0,0 @@
-#version 100
-
-#extension GL_OES_standard_derivatives : require
-uniform highp float u_skRTHeight;
-precision mediump float;
-precision mediump sampler2D;
-uniform highp vec4 urectUniform_Stage1;
-varying mediump vec4 vQuadEdge_Stage0;
-varying mediump vec4 vinColor_Stage0;
-void main() {
-highp vec4 sk_FragCoord = vec4(gl_FragCoord.x, u_skRTHeight - gl_FragCoord.y, gl_FragCoord.z, gl_FragCoord.w);
- mediump vec4 outputColor_Stage0;
- mediump vec4 outputCoverage_Stage0;
- {
- outputColor_Stage0 = vinColor_Stage0;
- mediump float edgeAlpha;
- mediump vec2 duvdx = dFdx(vQuadEdge_Stage0.xy);
- mediump vec2 duvdy = -dFdy(vQuadEdge_Stage0.xy);
- if (vQuadEdge_Stage0.z > 0.0 && vQuadEdge_Stage0.w > 0.0) {
- edgeAlpha = min(min(vQuadEdge_Stage0.z, vQuadEdge_Stage0.w) + 0.5, 1.0);
- } else {
- mediump vec2 gF = vec2((2.0 * vQuadEdge_Stage0.x) * duvdx.x - duvdx.y, (2.0 * vQuadEdge_Stage0.x) * duvdy.x - duvdy.y);
- edgeAlpha = vQuadEdge_Stage0.x * vQuadEdge_Stage0.x - vQuadEdge_Stage0.y;
- edgeAlpha = clamp(0.5 - edgeAlpha / length(gF), 0.0, 1.0);
- }
- outputCoverage_Stage0 = vec4(edgeAlpha);
- }
- mediump vec4 output_Stage1;
- {
- mediump float alpha;
- {
- mediump float xSub, ySub;
- xSub = min(sk_FragCoord.x - urectUniform_Stage1.x, 0.0);
- xSub += min(urectUniform_Stage1.z - sk_FragCoord.x, 0.0);
- ySub = min(sk_FragCoord.y - urectUniform_Stage1.y, 0.0);
- ySub += min(urectUniform_Stage1.w - sk_FragCoord.y, 0.0);
- alpha = (1.0 + max(xSub, -1.0)) * (1.0 + max(ySub, -1.0));
- }
- output_Stage1 = outputCoverage_Stage0 * alpha;
- }
- {
- gl_FragColor = outputColor_Stage0 * output_Stage1;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_quad_texture.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_quad_texture.glsl
deleted file mode 100644
index 1a56c79..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_quad_texture.glsl
+++ /dev/null
@@ -1,41 +0,0 @@
-#version 100
-
-#extension GL_OES_standard_derivatives : require
-uniform highp float u_skRTHeight;
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec2 uDstTextureUpperLeft_Stage1;
-uniform mediump vec2 uDstTextureCoordScale_Stage1;
-uniform sampler2D uDstTextureSampler_Stage1;
-varying mediump vec4 vQuadEdge_Stage0;
-varying mediump vec4 vinColor_Stage0;
-void main() {
-highp vec4 sk_FragCoord = vec4(gl_FragCoord.x, u_skRTHeight - gl_FragCoord.y, gl_FragCoord.z, gl_FragCoord.w);
- mediump vec4 outputColor_Stage0;
- mediump vec4 outputCoverage_Stage0;
- {
- outputColor_Stage0 = vinColor_Stage0;
- mediump float edgeAlpha;
- mediump vec2 duvdx = dFdx(vQuadEdge_Stage0.xy);
- mediump vec2 duvdy = -dFdy(vQuadEdge_Stage0.xy);
- if (vQuadEdge_Stage0.z > 0.0 && vQuadEdge_Stage0.w > 0.0) {
- edgeAlpha = min(min(vQuadEdge_Stage0.z, vQuadEdge_Stage0.w) + 0.5, 1.0);
- } else {
- mediump vec2 gF = vec2((2.0 * vQuadEdge_Stage0.x) * duvdx.x - duvdx.y, (2.0 * vQuadEdge_Stage0.x) * duvdy.x - duvdy.y);
- edgeAlpha = vQuadEdge_Stage0.x * vQuadEdge_Stage0.x - vQuadEdge_Stage0.y;
- edgeAlpha = clamp(0.5 - edgeAlpha / length(gF), 0.0, 1.0);
- }
- outputCoverage_Stage0 = vec4(edgeAlpha);
- }
- {
- if (all(lessThanEqual(outputCoverage_Stage0.xyz, vec3(0.0)))) {
- discard;
- }
- mediump vec2 _dstTexCoord = (sk_FragCoord.xy - uDstTextureUpperLeft_Stage1) * uDstTextureCoordScale_Stage1;
- _dstTexCoord.y = 1.0 - _dstTexCoord.y;
- mediump vec4 _dstColor = texture2D(uDstTextureSampler_Stage1, _dstTexCoord);
- gl_FragColor.w = outputColor_Stage0.w + (1.0 - outputColor_Stage0.w) * _dstColor.w;
- gl_FragColor.xyz = ((1.0 - outputColor_Stage0.w) * _dstColor.xyz + (1.0 - _dstColor.w) * outputColor_Stage0.xyz) + outputColor_Stage0.xyz * _dstColor.xyz;
- gl_FragColor = outputCoverage_Stage0 * gl_FragColor + (vec4(1.0) - outputCoverage_Stage0) * _dstColor;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_recdt_texindex_texturew.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_recdt_texindex_texturew.glsl
deleted file mode 100644
index 8088979..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_recdt_texindex_texturew.glsl
+++ /dev/null
@@ -1,39 +0,0 @@
-#version 100
-
-uniform highp float u_skRTHeight;
-precision mediump float;
-precision mediump sampler2D;
-uniform highp vec4 urectUniform_Stage1;
-uniform sampler2D uTextureSampler_0_Stage0;
-varying highp vec2 vTextureCoords_Stage0;
-varying highp float vTexIndex_Stage0;
-varying mediump vec4 vinColor_Stage0;
-void main() {
-highp vec4 sk_FragCoord = vec4(gl_FragCoord.x, u_skRTHeight - gl_FragCoord.y, gl_FragCoord.z, gl_FragCoord.w);
- mediump vec4 outputColor_Stage0;
- mediump vec4 outputCoverage_Stage0;
- {
- outputColor_Stage0 = vinColor_Stage0;
- mediump vec4 texColor;
- {
- texColor = texture2D(uTextureSampler_0_Stage0, vTextureCoords_Stage0).wwww;
- }
- outputCoverage_Stage0 = texColor;
- }
- mediump vec4 output_Stage1;
- {
- mediump float alpha;
- {
- mediump float xSub, ySub;
- xSub = min(sk_FragCoord.x - urectUniform_Stage1.x, 0.0);
- xSub += min(urectUniform_Stage1.z - sk_FragCoord.x, 0.0);
- ySub = min(sk_FragCoord.y - urectUniform_Stage1.y, 0.0);
- ySub += min(urectUniform_Stage1.w - sk_FragCoord.y, 0.0);
- alpha = (1.0 + max(xSub, -1.0)) * (1.0 + max(ySub, -1.0));
- }
- output_Stage1 = outputCoverage_Stage0 * alpha;
- }
- {
- gl_FragColor = outputColor_Stage0 * output_Stage1;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_rect.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_rect.glsl
deleted file mode 100644
index 0037504..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_rect.glsl
+++ /dev/null
@@ -1,30 +0,0 @@
-#version 100
-
-uniform highp float u_skRTHeight;
-precision mediump float;
-precision mediump sampler2D;
-uniform highp vec4 urectUniform_Stage1;
-varying mediump vec4 vcolor_Stage0;
-void main() {
-highp vec4 sk_FragCoord = vec4(gl_FragCoord.x, u_skRTHeight - gl_FragCoord.y, gl_FragCoord.z, gl_FragCoord.w);
- mediump vec4 outputColor_Stage0;
- {
- outputColor_Stage0 = vcolor_Stage0;
- }
- mediump vec4 output_Stage1;
- {
- mediump float alpha;
- {
- mediump float xSub, ySub;
- xSub = min(sk_FragCoord.x - urectUniform_Stage1.x, 0.0);
- xSub += min(urectUniform_Stage1.z - sk_FragCoord.x, 0.0);
- ySub = min(sk_FragCoord.y - urectUniform_Stage1.y, 0.0);
- ySub += min(urectUniform_Stage1.w - sk_FragCoord.y, 0.0);
- alpha = (1.0 + max(xSub, -1.0)) * (1.0 + max(ySub, -1.0));
- }
- output_Stage1 = vec4(alpha);
- }
- {
- gl_FragColor = outputColor_Stage0 * output_Stage1;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_rect_texturew.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_rect_texturew.glsl
deleted file mode 100644
index ef5da6c..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_rect_texturew.glsl
+++ /dev/null
@@ -1,36 +0,0 @@
-#version 100
-
-uniform highp float u_skRTHeight;
-precision mediump float;
-precision mediump sampler2D;
-uniform highp vec4 urectUniform_Stage2;
-uniform sampler2D uTextureSampler_0_Stage1;
-varying highp vec2 vTransformedCoords_0_Stage0;
-varying mediump vec4 vcolor_Stage0;
-void main() {
-highp vec4 sk_FragCoord = vec4(gl_FragCoord.x, u_skRTHeight - gl_FragCoord.y, gl_FragCoord.z, gl_FragCoord.w);
- mediump vec4 outputColor_Stage0;
- {
- outputColor_Stage0 = vcolor_Stage0;
- }
- mediump vec4 output_Stage1;
- {
- output_Stage1 = texture2D(uTextureSampler_0_Stage1, vTransformedCoords_0_Stage0).wwww;
- }
- mediump vec4 output_Stage2;
- {
- mediump float alpha;
- {
- mediump float xSub, ySub;
- xSub = min(sk_FragCoord.x - urectUniform_Stage2.x, 0.0);
- xSub += min(urectUniform_Stage2.z - sk_FragCoord.x, 0.0);
- ySub = min(sk_FragCoord.y - urectUniform_Stage2.y, 0.0);
- ySub += min(urectUniform_Stage2.w - sk_FragCoord.y, 0.0);
- alpha = (1.0 + max(xSub, -1.0)) * (1.0 + max(ySub, -1.0));
- }
- output_Stage2 = output_Stage1 * alpha;
- }
- {
- gl_FragColor = outputColor_Stage0 * output_Stage2;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texdom_texture.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texdom_texture.glsl
deleted file mode 100644
index 6706f14..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texdom_texture.glsl
+++ /dev/null
@@ -1,43 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uTexDom_Stage1_c0_c0;
-uniform mediump vec4 ucolor_Stage2_c1_c0;
-uniform sampler2D uTextureSampler_0_Stage1;
-varying highp vec2 vTransformedCoords_0_Stage0;
-varying mediump vec4 vcolor_Stage0;
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1992;
- {
- highp vec2 origCoord = vTransformedCoords_0_Stage0;
- highp vec2 clampedCoord = clamp(origCoord, uTexDom_Stage1_c0_c0.xy, uTexDom_Stage1_c0_c0.zw);
- mediump vec4 inside = texture2D(uTextureSampler_0_Stage1, clampedCoord) * _input;
- _sample1992 = inside;
- }
- return _sample1992;
-}
-mediump vec4 stage_Stage2_c1_c0(mediump vec4 _input) {
- mediump vec4 child;
- {
- child = ucolor_Stage2_c1_c0;
- }
- return child;
-}
-void main() {
- mediump vec4 output_Stage1;
- {
- mediump vec4 _sample1992;
- _sample1992 = stage_Stage1_c0_c0(vec4(1.0, 1.0, 1.0, 1.0));
- output_Stage1 = _sample1992;
- }
- mediump vec4 output_Stage2;
- {
- mediump vec4 child;
- child = stage_Stage2_c1_c0(vec4(1.0));
- output_Stage2 = child * output_Stage1.w;
- }
- {
- gl_FragColor = output_Stage2;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texindex_texture.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texindex_texture.glsl
deleted file mode 100644
index 3029665..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texindex_texture.glsl
+++ /dev/null
@@ -1,22 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uColor_Stage0;
-uniform sampler2D uTextureSampler_0_Stage0;
-varying highp vec2 vTextureCoords_Stage0;
-varying highp float vTexIndex_Stage0;
-void main() {
- mediump vec4 outputColor_Stage0;
- {
- outputColor_Stage0 = uColor_Stage0;
- mediump vec4 texColor;
- {
- texColor = texture2D(uTextureSampler_0_Stage0, vTextureCoords_Stage0);
- }
- outputColor_Stage0 = outputColor_Stage0 * texColor;
- }
- {
- gl_FragColor = outputColor_Stage0;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texindex_texturew.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texindex_texturew.glsl
deleted file mode 100644
index 32a6af2..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texindex_texturew.glsl
+++ /dev/null
@@ -1,30 +0,0 @@
-#version 100
-
-#extension GL_OES_standard_derivatives : require
-precision mediump float;
-precision mediump sampler2D;
-uniform sampler2D uTextureSampler_0_Stage0;
-varying mediump vec4 vinColor_Stage0;
-varying highp vec2 vTextureCoords_Stage0;
-varying highp float vTexIndex_Stage0;
-varying highp vec2 vIntTextureCoords_Stage0;
-void main() {
- mediump vec4 outputColor_Stage0;
- mediump vec4 outputCoverage_Stage0;
- {
- outputColor_Stage0 = vinColor_Stage0;
- highp vec2 uv = vTextureCoords_Stage0;
- mediump vec4 texColor;
- {
- texColor = texture2D(uTextureSampler_0_Stage0, uv).wwww;
- }
- mediump float distance = 7.96875 * (texColor.x - 0.501960813999176);
- mediump float afwidth;
- afwidth = abs(0.6499999761581421 * -dFdy(vIntTextureCoords_Stage0.y));
- mediump float val = smoothstep(-afwidth, afwidth, distance);
- outputCoverage_Stage0 = vec4(val);
- }
- {
- gl_FragColor = outputColor_Stage0 * outputCoverage_Stage0;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texindex_texturew_2.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texindex_texturew_2.glsl
deleted file mode 100644
index 7c4db08..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texindex_texturew_2.glsl
+++ /dev/null
@@ -1,30 +0,0 @@
-#version 100
-
-#extension GL_OES_standard_derivatives : require
-precision mediump float;
-precision mediump sampler2D;
-uniform sampler2D uTextureSampler_0_Stage0;
-varying highp vec2 vTextureCoords_Stage0;
-varying highp float vTexIndex_Stage0;
-varying highp vec2 vIntTextureCoords_Stage0;
-varying mediump vec4 vinColor_Stage0;
-void main() {
- mediump vec4 outputColor_Stage0;
- mediump vec4 outputCoverage_Stage0;
- {
- outputColor_Stage0 = vinColor_Stage0;
- highp vec2 uv = vTextureCoords_Stage0;
- mediump vec4 texColor;
- {
- texColor = texture2D(uTextureSampler_0_Stage0, uv).wwww;
- }
- mediump float distance = 7.96875 * (texColor.x - 0.501960813999176);
- mediump float afwidth;
- afwidth = abs(0.6499999761581421 * -dFdy(vIntTextureCoords_Stage0.y));
- mediump float val = smoothstep(-afwidth, afwidth, distance);
- outputCoverage_Stage0 = vec4(val);
- }
- {
- gl_FragColor = outputColor_Stage0 * outputCoverage_Stage0;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texindex_texturew_3.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texindex_texturew_3.glsl
deleted file mode 100644
index 421494c..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texindex_texturew_3.glsl
+++ /dev/null
@@ -1,40 +0,0 @@
-#version 100
-
-#extension GL_OES_standard_derivatives : require
-precision mediump float;
-precision mediump sampler2D;
-uniform sampler2D uTextureSampler_0_Stage0;
-varying mediump vec4 vinColor_Stage0;
-varying highp vec2 vTextureCoords_Stage0;
-varying highp float vTexIndex_Stage0;
-varying highp vec2 vIntTextureCoords_Stage0;
-void main() {
- mediump vec4 outputColor_Stage0;
- mediump vec4 outputCoverage_Stage0;
- {
- outputColor_Stage0 = vinColor_Stage0;
- highp vec2 uv = vTextureCoords_Stage0;
- mediump vec4 texColor;
- {
- texColor = texture2D(uTextureSampler_0_Stage0, uv).wwww;
- }
- mediump float distance = 7.96875 * (texColor.x - 0.501960813999176);
- mediump float afwidth;
- mediump vec2 dist_grad = vec2(dFdx(distance), -dFdy(distance));
- mediump float dg_len2 = dot(dist_grad, dist_grad);
- if (dg_len2 < 9.999999747378752e-05) {
- dist_grad = vec2(0.707099974155426, 0.707099974155426);
- } else {
- dist_grad = dist_grad * inversesqrt(dg_len2);
- }
- mediump vec2 Jdx = dFdx(vIntTextureCoords_Stage0);
- mediump vec2 Jdy = -dFdy(vIntTextureCoords_Stage0);
- mediump vec2 grad = vec2(dist_grad.x * Jdx.x + dist_grad.y * Jdy.x, dist_grad.x * Jdx.y + dist_grad.y * Jdy.y);
- afwidth = 0.6499999761581421 * length(grad);
- mediump float val = smoothstep(-afwidth, afwidth, distance);
- outputCoverage_Stage0 = vec4(val);
- }
- {
- gl_FragColor = outputColor_Stage0 * outputCoverage_Stage0;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texindex_texturew_4.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texindex_texturew_4.glsl
deleted file mode 100644
index 663553b..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texindex_texturew_4.glsl
+++ /dev/null
@@ -1,31 +0,0 @@
-#version 100
-
-#extension GL_OES_standard_derivatives : require
-precision mediump float;
-precision mediump sampler2D;
-uniform sampler2D uTextureSampler_0_Stage0;
-varying mediump vec4 vinColor_Stage0;
-varying highp vec2 vTextureCoords_Stage0;
-varying highp float vTexIndex_Stage0;
-varying highp vec2 vIntTextureCoords_Stage0;
-void main() {
- mediump vec4 outputColor_Stage0;
- mediump vec4 outputCoverage_Stage0;
- {
- outputColor_Stage0 = vinColor_Stage0;
- highp vec2 uv = vTextureCoords_Stage0;
- mediump vec4 texColor;
- {
- texColor = texture2D(uTextureSampler_0_Stage0, uv).wwww;
- }
- mediump float distance = 7.96875 * (texColor.x - 0.501960813999176);
- mediump float afwidth;
- mediump float st_grad_len = length(-dFdy(vIntTextureCoords_Stage0));
- afwidth = abs(0.6499999761581421 * st_grad_len);
- mediump float val = smoothstep(-afwidth, afwidth, distance);
- outputCoverage_Stage0 = vec4(val);
- }
- {
- gl_FragColor = outputColor_Stage0 * outputCoverage_Stage0;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texindex_texturew_5.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texindex_texturew_5.glsl
deleted file mode 100644
index 80cd4a3..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texindex_texturew_5.glsl
+++ /dev/null
@@ -1,31 +0,0 @@
-#version 100
-
-#extension GL_OES_standard_derivatives : require
-precision mediump float;
-precision mediump sampler2D;
-uniform sampler2D uTextureSampler_0_Stage0;
-varying highp vec2 vTextureCoords_Stage0;
-varying highp float vTexIndex_Stage0;
-varying highp vec2 vIntTextureCoords_Stage0;
-varying mediump vec4 vinColor_Stage0;
-void main() {
- mediump vec4 outputColor_Stage0;
- mediump vec4 outputCoverage_Stage0;
- {
- outputColor_Stage0 = vinColor_Stage0;
- highp vec2 uv = vTextureCoords_Stage0;
- mediump vec4 texColor;
- {
- texColor = texture2D(uTextureSampler_0_Stage0, uv).wwww;
- }
- mediump float distance = 7.96875 * (texColor.x - 0.501960813999176);
- mediump float afwidth;
- mediump float st_grad_len = length(-dFdy(vIntTextureCoords_Stage0));
- afwidth = abs(0.6499999761581421 * st_grad_len);
- mediump float val = smoothstep(-afwidth, afwidth, distance);
- outputCoverage_Stage0 = vec4(val);
- }
- {
- gl_FragColor = outputColor_Stage0 * outputCoverage_Stage0;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texindex_texturew_6.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texindex_texturew_6.glsl
deleted file mode 100644
index 3e406d8..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texindex_texturew_6.glsl
+++ /dev/null
@@ -1,36 +0,0 @@
-#version 100
-
-uniform highp float u_skRTHeight;
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec2 uDstTextureUpperLeft_Stage1;
-uniform mediump vec2 uDstTextureCoordScale_Stage1;
-uniform sampler2D uTextureSampler_0_Stage0;
-uniform sampler2D uDstTextureSampler_Stage1;
-varying highp vec2 vTextureCoords_Stage0;
-varying highp float vTexIndex_Stage0;
-varying mediump vec4 vinColor_Stage0;
-void main() {
-highp vec4 sk_FragCoord = vec4(gl_FragCoord.x, u_skRTHeight - gl_FragCoord.y, gl_FragCoord.z, gl_FragCoord.w);
- mediump vec4 outputColor_Stage0;
- mediump vec4 outputCoverage_Stage0;
- {
- outputColor_Stage0 = vinColor_Stage0;
- mediump vec4 texColor;
- {
- texColor = texture2D(uTextureSampler_0_Stage0, vTextureCoords_Stage0).wwww;
- }
- outputCoverage_Stage0 = texColor;
- }
- {
- if (all(lessThanEqual(outputCoverage_Stage0.xyz, vec3(0.0)))) {
- discard;
- }
- mediump vec2 _dstTexCoord = (sk_FragCoord.xy - uDstTextureUpperLeft_Stage1) * uDstTextureCoordScale_Stage1;
- _dstTexCoord.y = 1.0 - _dstTexCoord.y;
- mediump vec4 _dstColor = texture2D(uDstTextureSampler_Stage1, _dstTexCoord);
- gl_FragColor.w = outputColor_Stage0.w + (1.0 - outputColor_Stage0.w) * _dstColor.w;
- gl_FragColor.xyz = ((1.0 - outputColor_Stage0.w) * _dstColor.xyz + (1.0 - _dstColor.w) * outputColor_Stage0.xyz) + outputColor_Stage0.xyz * _dstColor.xyz;
- gl_FragColor = outputCoverage_Stage0 * gl_FragColor + (vec4(1.0) - outputCoverage_Stage0) * _dstColor;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texindex_texturew_7.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texindex_texturew_7.glsl
deleted file mode 100644
index 165aeed..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texindex_texturew_7.glsl
+++ /dev/null
@@ -1,40 +0,0 @@
-#version 100
-
-#extension GL_OES_standard_derivatives : require
-precision mediump float;
-precision mediump sampler2D;
-uniform sampler2D uTextureSampler_0_Stage0;
-varying highp vec2 vTextureCoords_Stage0;
-varying highp float vTexIndex_Stage0;
-varying highp vec2 vIntTextureCoords_Stage0;
-varying mediump vec4 vinColor_Stage0;
-void main() {
- mediump vec4 outputColor_Stage0;
- mediump vec4 outputCoverage_Stage0;
- {
- outputColor_Stage0 = vinColor_Stage0;
- highp vec2 uv = vTextureCoords_Stage0;
- mediump vec4 texColor;
- {
- texColor = texture2D(uTextureSampler_0_Stage0, uv).wwww;
- }
- mediump float distance = 7.96875 * (texColor.x - 0.501960813999176);
- mediump float afwidth;
- mediump vec2 dist_grad = vec2(dFdx(distance), -dFdy(distance));
- mediump float dg_len2 = dot(dist_grad, dist_grad);
- if (dg_len2 < 9.999999747378752e-05) {
- dist_grad = vec2(0.707099974155426, 0.707099974155426);
- } else {
- dist_grad = dist_grad * inversesqrt(dg_len2);
- }
- mediump vec2 Jdx = dFdx(vIntTextureCoords_Stage0);
- mediump vec2 Jdy = -dFdy(vIntTextureCoords_Stage0);
- mediump vec2 grad = vec2(dist_grad.x * Jdx.x + dist_grad.y * Jdy.x, dist_grad.x * Jdx.y + dist_grad.y * Jdy.y);
- afwidth = 0.6499999761581421 * length(grad);
- mediump float val = smoothstep(-afwidth, afwidth, distance);
- outputCoverage_Stage0 = vec4(val);
- }
- {
- gl_FragColor = outputColor_Stage0 * outputCoverage_Stage0;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texture.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texture.glsl
deleted file mode 100644
index 1a58ba6..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texture.glsl
+++ /dev/null
@@ -1,19 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform sampler2D uTextureSampler_0_Stage0;
-varying mediump vec4 vcolor_Stage0;
-varying highp vec2 vlocalCoord_Stage0;
-void main() {
- mediump vec4 outputColor_Stage0;
- {
- outputColor_Stage0 = vcolor_Stage0;
- highp vec2 texCoord;
- texCoord = vlocalCoord_Stage0;
- outputColor_Stage0 = texture2D(uTextureSampler_0_Stage0, texCoord) * outputColor_Stage0;
- }
- {
- gl_FragColor = outputColor_Stage0;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texture_10.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texture_10.glsl
deleted file mode 100644
index 525fe12..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texture_10.glsl
+++ /dev/null
@@ -1,28 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform sampler2D uTextureSampler_0_Stage1;
-varying highp vec2 vTransformedCoords_0_Stage0;
-varying mediump vec4 vcolor_Stage0;
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1992;
- _sample1992 = _input * texture2D(uTextureSampler_0_Stage1, vTransformedCoords_0_Stage0);
- return _sample1992;
-}
-void main() {
- mediump vec4 output_Stage1;
- {
- mediump vec4 _sample1992;
- _sample1992 = stage_Stage1_c0_c0(vec4(1.0, 1.0, 1.0, 1.0));
- output_Stage1 = _sample1992;
- }
- mediump vec4 output_Stage2;
- {
- mediump float luma = clamp(dot(vec3(0.2125999927520752, 0.7152000069618225, 0.0722000002861023), output_Stage1.xyz), 0.0, 1.0);
- output_Stage2 = vec4(0.0, 0.0, 0.0, luma);
- }
- {
- gl_FragColor = output_Stage2;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texture_11.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texture_11.glsl
deleted file mode 100644
index e8d7417..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texture_11.glsl
+++ /dev/null
@@ -1,70 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump float uweight_Stage2;
-uniform mediump mat4 um_Stage2_c1_c0_c0_c0;
-uniform mediump vec4 uv_Stage2_c1_c0_c0_c0;
-uniform sampler2D uTextureSampler_0_Stage1;
-uniform sampler2D uTextureSampler_0_Stage2;
-varying highp vec2 vTransformedCoords_0_Stage0;
-varying mediump vec4 vcolor_Stage0;
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1992;
- _sample1992 = _input * texture2D(uTextureSampler_0_Stage1, vTransformedCoords_0_Stage0);
- return _sample1992;
-}
-mediump vec4 stage_Stage2_c1_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 out0_c1_c0;
- mediump vec4 inputColor = _input;
- {
- mediump float nonZeroAlpha = max(inputColor.w, 9.999999747378752e-05);
- inputColor = vec4(inputColor.xyz / nonZeroAlpha, nonZeroAlpha);
- }
- out0_c1_c0 = um_Stage2_c1_c0_c0_c0 * inputColor + uv_Stage2_c1_c0_c0_c0;
- {
- out0_c1_c0 = clamp(out0_c1_c0, 0.0, 1.0);
- }
- {
- out0_c1_c0.xyz *= out0_c1_c0.w;
- }
- return out0_c1_c0;
-}
-mediump vec4 stage_Stage2_c1_c0_c1_c0(mediump vec4 _input) {
- mediump vec4 _sample1278;
- mediump float nonZeroAlpha = max(_input.w, 9.999999747378752e-05);
- mediump vec4 coord = vec4(_input.xyz / nonZeroAlpha, nonZeroAlpha);
- coord = coord * 0.9960939884185791 + vec4(0.0019529999699443579, 0.0019529999699443579, 0.0019529999699443579, 0.0019529999699443579);
- _sample1278.w = texture2D(uTextureSampler_0_Stage2, vec2(coord.w, 0.125)).w;
- _sample1278.x = texture2D(uTextureSampler_0_Stage2, vec2(coord.x, 0.375)).w;
- _sample1278.y = texture2D(uTextureSampler_0_Stage2, vec2(coord.y, 0.625)).w;
- _sample1278.z = texture2D(uTextureSampler_0_Stage2, vec2(coord.z, 0.875)).w;
- _sample1278.xyz *= _sample1278.w;
- return _sample1278;
-}
-mediump vec4 stage_Stage2_c1_c0(mediump vec4 _input) {
- mediump vec4 _sample1278;
- mediump vec4 out0_c1_c0;
- out0_c1_c0 = stage_Stage2_c1_c0_c0_c0(_input);
- _sample1278 = stage_Stage2_c1_c0_c1_c0(out0_c1_c0);
- return _sample1278;
-}
-void main() {
- mediump vec4 output_Stage1;
- {
- mediump vec4 _sample1992;
- _sample1992 = stage_Stage1_c0_c0(vec4(1.0, 1.0, 1.0, 1.0));
- output_Stage1 = _sample1992;
- }
- mediump vec4 output_Stage2;
- {
- mediump vec4 _sample1278;
- _sample1278 = stage_Stage2_c1_c0(output_Stage1);
- mediump vec4 in0 = _sample1278;
- mediump vec4 in1 = output_Stage1;
- output_Stage2 = mix(in0, in1, uweight_Stage2);
- }
- {
- gl_FragColor = output_Stage2;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texture_12.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texture_12.glsl
deleted file mode 100644
index 1db4bf2..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texture_12.glsl
+++ /dev/null
@@ -1,59 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform sampler2D uTextureSampler_0_Stage1;
-varying highp vec2 vTransformedCoords_0_Stage0;
-varying mediump vec4 vcolor_Stage0;
-mediump vec4 stage_Stage1_c0_c0_c0_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample453_c0_c0_c0_c0;
- mediump float t = vTransformedCoords_0_Stage0.x + 9.999999747378752e-06;
- _sample453_c0_c0_c0_c0 = vec4(t, 1.0, 0.0, 0.0);
- return _sample453_c0_c0_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0_c0_c0_c1_c0(mediump vec4 _input) {
- mediump vec4 _sample1464_c0_c0_c0_c0;
- mediump vec2 coord = vec2(_input.x, 0.5);
- _sample1464_c0_c0_c0_c0 = texture2D(uTextureSampler_0_Stage1, coord);
- return _sample1464_c0_c0_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0_c0_c0(mediump vec4 _input) {
- mediump vec4 child_c0_c0;
- mediump vec4 _sample453_c0_c0_c0_c0;
- _sample453_c0_c0_c0_c0 = stage_Stage1_c0_c0_c0_c0_c0_c0(vec4(1.0));
- mediump vec4 t = _sample453_c0_c0_c0_c0;
- {
- {
- t.x = fract(t.x);
- }
- mediump vec4 _sample1464_c0_c0_c0_c0;
- _sample1464_c0_c0_c0_c0 = stage_Stage1_c0_c0_c0_c0_c1_c0(t);
- child_c0_c0 = _sample1464_c0_c0_c0_c0;
- }
- {
- child_c0_c0.xyz *= child_c0_c0.w;
- }
- return child_c0_c0;
-}
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 child;
- mediump vec4 child_c0_c0;
- child_c0_c0 = stage_Stage1_c0_c0_c0_c0(vec4(1.0));
- child = child_c0_c0 * _input.w;
- return child;
-}
-void main() {
- mediump vec4 outputColor_Stage0;
- {
- outputColor_Stage0 = vcolor_Stage0;
- }
- mediump vec4 output_Stage1;
- {
- mediump vec4 child;
- child = stage_Stage1_c0_c0(vec4(1.0));
- output_Stage1 = vec4(child.w);
- }
- {
- gl_FragColor = outputColor_Stage0 * output_Stage1;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texture_13.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texture_13.glsl
deleted file mode 100644
index bf7fde6..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texture_13.glsl
+++ /dev/null
@@ -1,49 +0,0 @@
-#version 100
-
-uniform highp float u_skRTHeight;
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uscaleAndTranslate_Stage2;
-uniform mediump vec4 uTexDom_Stage2;
-uniform mediump vec3 uDecalParams_Stage2;
-uniform sampler2D uTextureSampler_0_Stage1;
-uniform sampler2D uTextureSampler_0_Stage2;
-varying highp vec2 vTransformedCoords_0_Stage0;
-varying mediump vec4 vcolor_Stage0;
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 child;
- child = _input * texture2D(uTextureSampler_0_Stage1, vTransformedCoords_0_Stage0);
- return child;
-}
-void main() {
-highp vec4 sk_FragCoord = vec4(gl_FragCoord.x, u_skRTHeight - gl_FragCoord.y, gl_FragCoord.z, gl_FragCoord.w);
- mediump vec4 outputColor_Stage0;
- {
- outputColor_Stage0 = vcolor_Stage0;
- }
- mediump vec4 output_Stage1;
- {
- mediump vec4 child;
- child = stage_Stage1_c0_c0(vec4(1.0));
- output_Stage1 = child * outputColor_Stage0.w;
- }
- mediump vec4 output_Stage2;
- {
- mediump vec2 coords = sk_FragCoord.xy * uscaleAndTranslate_Stage2.xy + uscaleAndTranslate_Stage2.zw;
- {
- highp vec2 origCoord = coords;
- highp vec2 clampedCoord = clamp(origCoord, uTexDom_Stage2.xy, uTexDom_Stage2.zw);
- mediump vec4 inside = texture2D(uTextureSampler_0_Stage2, clampedCoord).wwww;
- mediump float err = max(abs(clampedCoord.x - origCoord.x) * uDecalParams_Stage2.x, abs(clampedCoord.y - origCoord.y) * uDecalParams_Stage2.y);
- if (err > uDecalParams_Stage2.z) {
- err = 1.0;
- } else if (uDecalParams_Stage2.z < 1.0) {
- err = 0.0;
- }
- output_Stage2 = mix(inside, vec4(0.0, 0.0, 0.0, 0.0), err);
- }
- }
- {
- gl_FragColor = output_Stage1 * output_Stage2;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texture_14.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texture_14.glsl
deleted file mode 100644
index 936209f..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texture_14.glsl
+++ /dev/null
@@ -1,29 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uColor_Stage0;
-uniform mediump vec4 ucolor_Stage1;
-uniform sampler2D uTextureSampler_0_Stage0;
-varying highp vec2 vTextureCoords_Stage0;
-varying highp float vTexIndex_Stage0;
-void main() {
- mediump vec4 outputColor_Stage0;
- {
- outputColor_Stage0 = uColor_Stage0;
- mediump vec4 texColor;
- {
- texColor = texture2D(uTextureSampler_0_Stage0, vTextureCoords_Stage0);
- }
- outputColor_Stage0 = outputColor_Stage0 * texColor;
- }
- mediump vec4 output_Stage1;
- {
- {
- output_Stage1 = outputColor_Stage0 * ucolor_Stage1;
- }
- }
- {
- gl_FragColor = output_Stage1;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texture_2.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texture_2.glsl
deleted file mode 100644
index 3a1c53d..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texture_2.glsl
+++ /dev/null
@@ -1,23 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform sampler2D uTextureSampler_0_Stage1;
-varying highp vec2 vTransformedCoords_0_Stage0;
-varying mediump vec4 vcolor_Stage0;
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1992;
- _sample1992 = _input * texture2D(uTextureSampler_0_Stage1, vTransformedCoords_0_Stage0);
- return _sample1992;
-}
-void main() {
- mediump vec4 output_Stage1;
- {
- mediump vec4 _sample1992;
- _sample1992 = stage_Stage1_c0_c0(vec4(1.0, 1.0, 1.0, 1.0));
- output_Stage1 = _sample1992;
- }
- {
- gl_FragColor = output_Stage1;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texture_3.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texture_3.glsl
deleted file mode 100644
index d6aecb4..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texture_3.glsl
+++ /dev/null
@@ -1,27 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform sampler2D uTextureSampler_0_Stage1;
-varying highp vec2 vTransformedCoords_0_Stage0;
-varying mediump vec4 vcolor_Stage0;
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 child;
- child = _input * texture2D(uTextureSampler_0_Stage1, vTransformedCoords_0_Stage0);
- return child;
-}
-void main() {
- mediump vec4 outputColor_Stage0;
- {
- outputColor_Stage0 = vcolor_Stage0;
- }
- mediump vec4 output_Stage1;
- {
- mediump vec4 child;
- child = stage_Stage1_c0_c0(vec4(1.0));
- output_Stage1 = child * outputColor_Stage0.w;
- }
- {
- gl_FragColor = output_Stage1;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texture_4.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texture_4.glsl
deleted file mode 100644
index a1f6d16..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texture_4.glsl
+++ /dev/null
@@ -1,37 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 ucolor_Stage2_c1_c0;
-uniform sampler2D uTextureSampler_0_Stage1;
-varying highp vec2 vTransformedCoords_0_Stage0;
-varying mediump vec4 vcolor_Stage0;
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1992;
- _sample1992 = _input * texture2D(uTextureSampler_0_Stage1, vTransformedCoords_0_Stage0);
- return _sample1992;
-}
-mediump vec4 stage_Stage2_c1_c0(mediump vec4 _input) {
- mediump vec4 child;
- {
- child = ucolor_Stage2_c1_c0;
- }
- return child;
-}
-void main() {
- mediump vec4 output_Stage1;
- {
- mediump vec4 _sample1992;
- _sample1992 = stage_Stage1_c0_c0(vec4(1.0, 1.0, 1.0, 1.0));
- output_Stage1 = _sample1992;
- }
- mediump vec4 output_Stage2;
- {
- mediump vec4 child;
- child = stage_Stage2_c1_c0(vec4(1.0));
- output_Stage2 = child * output_Stage1.w;
- }
- {
- gl_FragColor = output_Stage2;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texture_5.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texture_5.glsl
deleted file mode 100644
index f38301a..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texture_5.glsl
+++ /dev/null
@@ -1,36 +0,0 @@
-#version 100
-
-uniform highp float u_skRTHeight;
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec2 uDstTextureUpperLeft_Stage2;
-uniform mediump vec2 uDstTextureCoordScale_Stage2;
-uniform sampler2D uTextureSampler_0_Stage1;
-uniform sampler2D uDstTextureSampler_Stage2;
-varying highp vec2 vTransformedCoords_0_Stage0;
-varying mediump vec4 vcolor_Stage0;
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1992;
- _sample1992 = _input * texture2D(uTextureSampler_0_Stage1, vTransformedCoords_0_Stage0);
- return _sample1992;
-}
-void main() {
-highp vec4 sk_FragCoord = vec4(gl_FragCoord.x, u_skRTHeight - gl_FragCoord.y, gl_FragCoord.z, gl_FragCoord.w);
- mediump vec4 output_Stage1;
- {
- mediump vec4 _sample1992;
- _sample1992 = stage_Stage1_c0_c0(vec4(1.0, 1.0, 1.0, 1.0));
- output_Stage1 = _sample1992;
- }
- {
- if (all(lessThanEqual(vec4(1.0).xyz, vec3(0.0)))) {
- discard;
- }
- mediump vec2 _dstTexCoord = (sk_FragCoord.xy - uDstTextureUpperLeft_Stage2) * uDstTextureCoordScale_Stage2;
- _dstTexCoord.y = 1.0 - _dstTexCoord.y;
- mediump vec4 _dstColor = texture2D(uDstTextureSampler_Stage2, _dstTexCoord);
- gl_FragColor.w = output_Stage1.w + (1.0 - output_Stage1.w) * _dstColor.w;
- gl_FragColor.xyz = ((1.0 - output_Stage1.w) * _dstColor.xyz + (1.0 - _dstColor.w) * output_Stage1.xyz) + output_Stage1.xyz * _dstColor.xyz;
- gl_FragColor = gl_FragColor;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texture_6.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texture_6.glsl
deleted file mode 100644
index e8e73af..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texture_6.glsl
+++ /dev/null
@@ -1,27 +0,0 @@
-#version 100
-
-uniform highp float u_skRTHeight;
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec2 uDstTextureUpperLeft_Stage1;
-uniform mediump vec2 uDstTextureCoordScale_Stage1;
-uniform sampler2D uDstTextureSampler_Stage1;
-varying mediump vec4 vcolor_Stage0;
-void main() {
-highp vec4 sk_FragCoord = vec4(gl_FragCoord.x, u_skRTHeight - gl_FragCoord.y, gl_FragCoord.z, gl_FragCoord.w);
- mediump vec4 outputColor_Stage0;
- {
- outputColor_Stage0 = vcolor_Stage0;
- }
- {
- if (all(lessThanEqual(vec4(1.0).xyz, vec3(0.0)))) {
- discard;
- }
- mediump vec2 _dstTexCoord = (sk_FragCoord.xy - uDstTextureUpperLeft_Stage1) * uDstTextureCoordScale_Stage1;
- _dstTexCoord.y = 1.0 - _dstTexCoord.y;
- mediump vec4 _dstColor = texture2D(uDstTextureSampler_Stage1, _dstTexCoord);
- gl_FragColor.w = outputColor_Stage0.w + (1.0 - outputColor_Stage0.w) * _dstColor.w;
- gl_FragColor.xyz = ((1.0 - outputColor_Stage0.w) * _dstColor.xyz + (1.0 - _dstColor.w) * outputColor_Stage0.xyz) + outputColor_Stage0.xyz * _dstColor.xyz;
- gl_FragColor = gl_FragColor;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texture_7.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texture_7.glsl
deleted file mode 100644
index 04ddcf3..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texture_7.glsl
+++ /dev/null
@@ -1,44 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump mat4 um_Stage2;
-uniform mediump vec4 uv_Stage2;
-uniform sampler2D uTextureSampler_0_Stage1;
-varying highp vec2 vTransformedCoords_0_Stage0;
-varying mediump vec4 vcolor_Stage0;
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 child;
- child = _input * texture2D(uTextureSampler_0_Stage1, vTransformedCoords_0_Stage0);
- return child;
-}
-void main() {
- mediump vec4 outputColor_Stage0;
- {
- outputColor_Stage0 = vcolor_Stage0;
- }
- mediump vec4 output_Stage1;
- {
- mediump vec4 child;
- child = stage_Stage1_c0_c0(vec4(1.0));
- output_Stage1 = child * outputColor_Stage0.w;
- }
- mediump vec4 output_Stage2;
- {
- mediump vec4 inputColor = output_Stage1;
- {
- mediump float nonZeroAlpha = max(inputColor.w, 9.9999997473787516e-05);
- inputColor = vec4(inputColor.xyz / nonZeroAlpha, nonZeroAlpha);
- }
- output_Stage2 = um_Stage2 * inputColor + uv_Stage2;
- {
- output_Stage2 = clamp(output_Stage2, 0.0, 1.0);
- }
- {
- output_Stage2.xyz *= output_Stage2.w;
- }
- }
- {
- gl_FragColor = output_Stage2;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texture_8.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texture_8.glsl
deleted file mode 100644
index 9d4b5a9..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texture_8.glsl
+++ /dev/null
@@ -1,40 +0,0 @@
-#version 100
-
-uniform highp float u_skRTHeight;
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec2 uDstTextureUpperLeft_Stage2;
-uniform mediump vec2 uDstTextureCoordScale_Stage2;
-uniform sampler2D uTextureSampler_0_Stage1;
-uniform sampler2D uDstTextureSampler_Stage2;
-varying highp vec2 vTransformedCoords_0_Stage0;
-varying mediump vec4 vcolor_Stage0;
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 child;
- child = _input * texture2D(uTextureSampler_0_Stage1, vTransformedCoords_0_Stage0);
- return child;
-}
-void main() {
-highp vec4 sk_FragCoord = vec4(gl_FragCoord.x, u_skRTHeight - gl_FragCoord.y, gl_FragCoord.z, gl_FragCoord.w);
- mediump vec4 outputColor_Stage0;
- {
- outputColor_Stage0 = vcolor_Stage0;
- }
- mediump vec4 output_Stage1;
- {
- mediump vec4 child;
- child = stage_Stage1_c0_c0(vec4(1.0));
- output_Stage1 = child * outputColor_Stage0.w;
- }
- {
- if (all(lessThanEqual(vec4(1.0).xyz, vec3(0.0)))) {
- discard;
- }
- mediump vec2 _dstTexCoord = (sk_FragCoord.xy - uDstTextureUpperLeft_Stage2) * uDstTextureCoordScale_Stage2;
- _dstTexCoord.y = 1.0 - _dstTexCoord.y;
- mediump vec4 _dstColor = texture2D(uDstTextureSampler_Stage2, _dstTexCoord);
- gl_FragColor.w = output_Stage1.w + (1.0 - output_Stage1.w) * _dstColor.w;
- gl_FragColor.xyz = ((1.0 - output_Stage1.w) * _dstColor.xyz + (1.0 - _dstColor.w) * output_Stage1.xyz) + output_Stage1.xyz * _dstColor.xyz;
- gl_FragColor = gl_FragColor;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texture_9.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texture_9.glsl
deleted file mode 100644
index fb5030e..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texture_9.glsl
+++ /dev/null
@@ -1,50 +0,0 @@
-#version 100
-
-uniform highp float u_skRTHeight;
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 ucolor_Stage2_c1_c0;
-uniform mediump vec2 uDstTextureUpperLeft_Stage3;
-uniform mediump vec2 uDstTextureCoordScale_Stage3;
-uniform sampler2D uTextureSampler_0_Stage1;
-uniform sampler2D uDstTextureSampler_Stage3;
-varying highp vec2 vTransformedCoords_0_Stage0;
-varying mediump vec4 vcolor_Stage0;
-mediump vec4 stage_Stage1_c0_c0(mediump vec4 _input) {
- mediump vec4 _sample1992;
- _sample1992 = _input * texture2D(uTextureSampler_0_Stage1, vTransformedCoords_0_Stage0);
- return _sample1992;
-}
-mediump vec4 stage_Stage2_c1_c0(mediump vec4 _input) {
- mediump vec4 child;
- {
- child = ucolor_Stage2_c1_c0;
- }
- return child;
-}
-void main() {
-highp vec4 sk_FragCoord = vec4(gl_FragCoord.x, u_skRTHeight - gl_FragCoord.y, gl_FragCoord.z, gl_FragCoord.w);
- mediump vec4 output_Stage1;
- {
- mediump vec4 _sample1992;
- _sample1992 = stage_Stage1_c0_c0(vec4(1.0, 1.0, 1.0, 1.0));
- output_Stage1 = _sample1992;
- }
- mediump vec4 output_Stage2;
- {
- mediump vec4 child;
- child = stage_Stage2_c1_c0(vec4(1.0));
- output_Stage2 = child * output_Stage1.w;
- }
- {
- if (all(lessThanEqual(vec4(1.0).xyz, vec3(0.0)))) {
- discard;
- }
- mediump vec2 _dstTexCoord = (sk_FragCoord.xy - uDstTextureUpperLeft_Stage3) * uDstTextureCoordScale_Stage3;
- _dstTexCoord.y = 1.0 - _dstTexCoord.y;
- mediump vec4 _dstColor = texture2D(uDstTextureSampler_Stage3, _dstTexCoord);
- gl_FragColor.w = output_Stage2.w + (1.0 - output_Stage2.w) * _dstColor.w;
- gl_FragColor.xyz = ((1.0 - output_Stage2.w) * _dstColor.xyz + (1.0 - _dstColor.w) * output_Stage2.xyz) + output_Stage2.xyz * _dstColor.xyz;
- gl_FragColor = gl_FragColor;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texturew.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texturew.glsl
deleted file mode 100644
index 351c554..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texturew.glsl
+++ /dev/null
@@ -1,23 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform sampler2D uTextureSampler_0_Stage0;
-varying highp vec2 vTextureCoords_Stage0;
-varying highp float vTexIndex_Stage0;
-varying mediump vec4 vinColor_Stage0;
-void main() {
- mediump vec4 outputColor_Stage0;
- mediump vec4 outputCoverage_Stage0;
- {
- outputColor_Stage0 = vinColor_Stage0;
- mediump vec4 texColor;
- {
- texColor = texture2D(uTextureSampler_0_Stage0, vTextureCoords_Stage0).wwww;
- }
- outputCoverage_Stage0 = texColor;
- }
- {
- gl_FragColor = outputColor_Stage0 * outputCoverage_Stage0;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texturew_2.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texturew_2.glsl
deleted file mode 100644
index 86c39c7..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texturew_2.glsl
+++ /dev/null
@@ -1,20 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform sampler2D uTextureSampler_0_Stage1;
-varying highp vec2 vTransformedCoords_0_Stage0;
-varying mediump vec4 vcolor_Stage0;
-void main() {
- mediump vec4 outputColor_Stage0;
- {
- outputColor_Stage0 = vcolor_Stage0;
- }
- mediump vec4 output_Stage1;
- {
- output_Stage1 = texture2D(uTextureSampler_0_Stage1, vTransformedCoords_0_Stage0).wwww;
- }
- {
- gl_FragColor = outputColor_Stage0 * output_Stage1;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texturew_3.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texturew_3.glsl
deleted file mode 100644
index e64d19e..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texturew_3.glsl
+++ /dev/null
@@ -1,16 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform sampler2D uTextureSampler_0_Stage1;
-varying highp vec2 vTransformedCoords_0_Stage0;
-varying mediump vec4 vcolor_Stage0;
-void main() {
- mediump vec4 output_Stage1;
- {
- output_Stage1 = texture2D(uTextureSampler_0_Stage1, vTransformedCoords_0_Stage0).wwww;
- }
- {
- gl_FragColor = output_Stage1;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texturew_4.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texturew_4.glsl
deleted file mode 100644
index 08515a2..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_color_texturew_4.glsl
+++ /dev/null
@@ -1,21 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform sampler2D uTextureSampler_0_Stage1;
-varying highp vec3 vTransformedCoords_0_Stage0;
-varying mediump vec4 vcolor_Stage0;
-void main() {
- mediump vec4 outputColor_Stage0;
- {
- outputColor_Stage0 = vcolor_Stage0;
- }
- mediump vec4 output_Stage1;
- {
- highp vec2 vTransformedCoords_0_Stage0_ensure2D = vTransformedCoords_0_Stage0.xy / vTransformedCoords_0_Stage0.z;
- output_Stage1 = texture2D(uTextureSampler_0_Stage1, vTransformedCoords_0_Stage0_ensure2D).wwww;
- }
- {
- gl_FragColor = outputColor_Stage0 * output_Stage1;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_coverage_texdom_texture.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_coverage_texdom_texture.glsl
deleted file mode 100644
index 1ad3b80..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_coverage_texdom_texture.glsl
+++ /dev/null
@@ -1,39 +0,0 @@
-#version 100
-
-uniform highp float u_skRTHeight;
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uTexDom_Stage1;
-uniform mediump vec2 uDstTextureUpperLeft_Stage2;
-uniform mediump vec2 uDstTextureCoordScale_Stage2;
-uniform sampler2D uTextureSampler_0_Stage1;
-uniform sampler2D uDstTextureSampler_Stage2;
-varying highp vec2 vTransformedCoords_0_Stage0;
-varying highp float vcoverage_Stage0;
-void main() {
-highp vec4 sk_FragCoord = vec4(gl_FragCoord.x, u_skRTHeight - gl_FragCoord.y, gl_FragCoord.z, gl_FragCoord.w);
- mediump vec4 outputCoverage_Stage0;
- {
- highp float coverage = vcoverage_Stage0;
- outputCoverage_Stage0 = vec4(coverage);
- }
- mediump vec4 output_Stage1;
- {
- {
- highp vec2 origCoord = vTransformedCoords_0_Stage0;
- highp vec2 clampedCoord = clamp(origCoord, uTexDom_Stage1.xy, uTexDom_Stage1.zw);
- mediump vec4 inside = texture2D(uTextureSampler_0_Stage1, clampedCoord);
- output_Stage1 = inside;
- }
- }
- {
- if (all(lessThanEqual(outputCoverage_Stage0.xyz, vec3(0.0)))) {
- discard;
- }
- mediump vec2 _dstTexCoord = (sk_FragCoord.xy - uDstTextureUpperLeft_Stage2) * uDstTextureCoordScale_Stage2;
- _dstTexCoord.y = 1.0 - _dstTexCoord.y;
- mediump vec4 _dstColor = texture2D(uDstTextureSampler_Stage2, _dstTexCoord);
- gl_FragColor = output_Stage1;
- gl_FragColor = outputCoverage_Stage0 * gl_FragColor + (vec4(1.0) - outputCoverage_Stage0) * _dstColor;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_decal_scaletranslate_texdom_textures.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_decal_scaletranslate_texdom_textures.glsl
deleted file mode 100644
index c488cf6..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_decal_scaletranslate_texdom_textures.glsl
+++ /dev/null
@@ -1,40 +0,0 @@
-#version 100
-
-uniform highp float u_skRTHeight;
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uscaleAndTranslate_Stage1;
-uniform mediump vec4 uTexDom_Stage1;
-uniform mediump vec3 uDecalParams_Stage1;
-uniform sampler2D uTextureSampler_0_Stage0;
-uniform sampler2D uTextureSampler_0_Stage1;
-varying highp vec2 vlocalCoord_Stage0;
-void main() {
-highp vec4 sk_FragCoord = vec4(gl_FragCoord.x, u_skRTHeight - gl_FragCoord.y, gl_FragCoord.z, gl_FragCoord.w);
- mediump vec4 outputColor_Stage0;
- {
- outputColor_Stage0 = vec4(1.0);
- highp vec2 texCoord;
- texCoord = vlocalCoord_Stage0;
- outputColor_Stage0 = texture2D(uTextureSampler_0_Stage0, texCoord);
- }
- mediump vec4 output_Stage1;
- {
- mediump vec2 coords = sk_FragCoord.xy * uscaleAndTranslate_Stage1.xy + uscaleAndTranslate_Stage1.zw;
- {
- highp vec2 origCoord = coords;
- highp vec2 clampedCoord = clamp(origCoord, uTexDom_Stage1.xy, uTexDom_Stage1.zw);
- mediump vec4 inside = texture2D(uTextureSampler_0_Stage1, clampedCoord).wwww;
- mediump float err = max(abs(clampedCoord.x - origCoord.x) * uDecalParams_Stage1.x, abs(clampedCoord.y - origCoord.y) * uDecalParams_Stage1.y);
- if (err > uDecalParams_Stage1.z) {
- err = 1.0;
- } else if (uDecalParams_Stage1.z < 1.0) {
- err = 0.0;
- }
- output_Stage1 = mix(inside, vec4(0.0, 0.0, 0.0, 0.0), err);
- }
- }
- {
- gl_FragColor = outputColor_Stage0 * output_Stage1;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_decal_texdom_texture.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_decal_texdom_texture.glsl
deleted file mode 100644
index 466bb98..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_decal_texdom_texture.glsl
+++ /dev/null
@@ -1,28 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uTexDom_Stage1;
-uniform mediump vec3 uDecalParams_Stage1;
-uniform sampler2D uTextureSampler_0_Stage1;
-varying highp vec2 vTransformedCoords_0_Stage0;
-void main() {
- mediump vec4 output_Stage1;
- {
- {
- highp vec2 origCoord = vTransformedCoords_0_Stage0;
- highp vec2 clampedCoord = clamp(origCoord, uTexDom_Stage1.xy, uTexDom_Stage1.zw);
- mediump vec4 inside = texture2D(uTextureSampler_0_Stage1, clampedCoord);
- mediump float err = max(abs(clampedCoord.x - origCoord.x) * uDecalParams_Stage1.x, abs(clampedCoord.y - origCoord.y) * uDecalParams_Stage1.y);
- if (err > uDecalParams_Stage1.z) {
- err = 1.0;
- } else if (uDecalParams_Stage1.z < 1.0) {
- err = 0.0;
- }
- output_Stage1 = mix(inside, vec4(0.0, 0.0, 0.0, 0.0), err);
- }
- }
- {
- gl_FragColor = output_Stage1;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_edges_texture.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_edges_texture.glsl
deleted file mode 100644
index 4cc0745..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_edges_texture.glsl
+++ /dev/null
@@ -1,39 +0,0 @@
-#version 100
-
-uniform highp float u_skRTHeight;
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec3 uedges_Stage1[4];
-uniform sampler2D uTextureSampler_0_Stage0;
-varying highp vec2 vlocalCoord_Stage0;
-void main() {
-highp vec4 sk_FragCoord = vec4(gl_FragCoord.x, u_skRTHeight - gl_FragCoord.y, gl_FragCoord.z, gl_FragCoord.w);
- mediump vec4 outputColor_Stage0;
- {
- outputColor_Stage0 = vec4(1.0);
- highp vec2 texCoord;
- texCoord = vlocalCoord_Stage0;
- outputColor_Stage0 = texture2D(uTextureSampler_0_Stage0, texCoord);
- }
- mediump vec4 output_Stage1;
- {
- mediump float alpha = 1.0;
- mediump float edge;
- edge = dot(uedges_Stage1[0], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = edge >= 0.5 ? 1.0 : 0.0;
- alpha *= edge;
- edge = dot(uedges_Stage1[1], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = edge >= 0.5 ? 1.0 : 0.0;
- alpha *= edge;
- edge = dot(uedges_Stage1[2], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = edge >= 0.5 ? 1.0 : 0.0;
- alpha *= edge;
- edge = dot(uedges_Stage1[3], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = edge >= 0.5 ? 1.0 : 0.0;
- alpha *= edge;
- output_Stage1 = vec4(alpha);
- }
- {
- gl_FragColor = outputColor_Stage0 * output_Stage1;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_ellipse_scale_texture.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_ellipse_scale_texture.glsl
deleted file mode 100644
index d283f7d..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_ellipse_scale_texture.glsl
+++ /dev/null
@@ -1,44 +0,0 @@
-#version 100
-
-uniform highp float u_skRTHeight;
-precision mediump float;
-precision mediump sampler2D;
-uniform highp vec4 uellipse_Stage1;
-uniform highp vec2 uscale_Stage1;
-uniform sampler2D uTextureSampler_0_Stage0;
-varying highp vec2 vlocalCoord_Stage0;
-void main() {
-highp vec4 sk_FragCoord = vec4(gl_FragCoord.x, u_skRTHeight - gl_FragCoord.y, gl_FragCoord.z, gl_FragCoord.w);
- mediump vec4 outputColor_Stage0;
- {
- outputColor_Stage0 = vec4(1.0);
- highp vec2 texCoord;
- texCoord = vlocalCoord_Stage0;
- outputColor_Stage0 = texture2D(uTextureSampler_0_Stage0, texCoord);
- }
- mediump vec4 output_Stage1;
- {
- highp vec2 d = sk_FragCoord.xy - uellipse_Stage1.xy;
- {
- d *= uscale_Stage1.y;
- }
- highp vec2 Z = d * uellipse_Stage1.zw;
- highp float implicit = dot(Z, d) - 1.0;
- highp float grad_dot = 4.0 * dot(Z, Z);
- {
- grad_dot = max(grad_dot, 6.1036000261083245e-05);
- }
- highp float approx_dist = implicit * inversesqrt(grad_dot);
- {
- approx_dist *= uscale_Stage1.x;
- }
- mediump float alpha;
- {
- alpha = clamp(0.5 - approx_dist, 0.0, 1.0);
- }
- output_Stage1 = vec4(alpha);
- }
- {
- gl_FragColor = outputColor_Stage0 * output_Stage1;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_increment_kernel_texture.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_increment_kernel_texture.glsl
deleted file mode 100644
index c345bb9..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_increment_kernel_texture.glsl
+++ /dev/null
@@ -1,94 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec2 uImageIncrement_Stage1;
-uniform mediump vec4 uKernel_Stage1[7];
-uniform sampler2D uTextureSampler_0_Stage1;
-varying highp vec2 vTransformedCoords_0_Stage0;
-void main() {
- mediump vec4 output_Stage1;
- {
- output_Stage1 = vec4(0.0, 0.0, 0.0, 0.0);
- highp vec2 coord = vTransformedCoords_0_Stage0 - 12.0 * uImageIncrement_Stage1;
- highp vec2 coordSampled = vec2(0.0, 0.0);
- coordSampled = coord;
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[0].x;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[0].y;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[0].z;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[0].w;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[1].x;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[1].y;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[1].z;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[1].w;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[2].x;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[2].y;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[2].z;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[2].w;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[3].x;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[3].y;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[3].z;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[3].w;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[4].x;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[4].y;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[4].z;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[4].w;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[5].x;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[5].y;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[5].z;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[5].w;
- coord += uImageIncrement_Stage1;
- coordSampled = coord;
- output_Stage1 += texture2D(uTextureSampler_0_Stage1, coordSampled) * uKernel_Stage1[6].x;
- coord += uImageIncrement_Stage1;
- }
- {
- gl_FragColor = output_Stage1;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_radii_rect_scale_texture.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_radii_rect_scale_texture.glsl
deleted file mode 100644
index 9999c6d..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_radii_rect_scale_texture.glsl
+++ /dev/null
@@ -1,38 +0,0 @@
-#version 100
-
-uniform highp float u_skRTHeight;
-precision mediump float;
-precision mediump sampler2D;
-uniform highp vec4 uinnerRect_Stage1;
-uniform mediump vec2 uscale_Stage1;
-uniform highp vec2 uinvRadiiXY_Stage1;
-uniform sampler2D uTextureSampler_0_Stage0;
-varying highp vec2 vlocalCoord_Stage0;
-void main() {
-highp vec4 sk_FragCoord = vec4(gl_FragCoord.x, u_skRTHeight - gl_FragCoord.y, gl_FragCoord.z, gl_FragCoord.w);
- mediump vec4 outputColor_Stage0;
- {
- outputColor_Stage0 = vec4(1.0);
- highp vec2 texCoord;
- texCoord = vlocalCoord_Stage0;
- outputColor_Stage0 = texture2D(uTextureSampler_0_Stage0, texCoord);
- }
- mediump vec4 output_Stage1;
- {
- highp vec2 dxy0 = uinnerRect_Stage1.xy - sk_FragCoord.xy;
- highp vec2 dxy1 = sk_FragCoord.xy - uinnerRect_Stage1.zw;
- highp vec2 dxy = max(max(dxy0, dxy1), 0.0);
- dxy *= uscale_Stage1.y;
- highp vec2 Z = dxy * uinvRadiiXY_Stage1;
- mediump float implicit = dot(Z, dxy) - 1.0;
- mediump float grad_dot = 4.0 * dot(Z, Z);
- grad_dot = max(grad_dot, 9.9999997473787516e-05);
- mediump float approx_dist = implicit * inversesqrt(grad_dot);
- approx_dist *= uscale_Stage1.x;
- mediump float alpha = clamp(0.5 - approx_dist, 0.0, 1.0);
- output_Stage1 = vec4(alpha);
- }
- {
- gl_FragColor = outputColor_Stage0 * output_Stage1;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_texdom_texture.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_texdom_texture.glsl
deleted file mode 100644
index fefc5ce..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_texdom_texture.glsl
+++ /dev/null
@@ -1,21 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec4 uTexDom_Stage1;
-uniform sampler2D uTextureSampler_0_Stage1;
-varying highp vec2 vTransformedCoords_0_Stage0;
-void main() {
- mediump vec4 output_Stage1;
- {
- {
- highp vec2 origCoord = vTransformedCoords_0_Stage0;
- highp vec2 clampedCoord = clamp(origCoord, uTexDom_Stage1.xy, uTexDom_Stage1.zw);
- mediump vec4 inside = texture2D(uTextureSampler_0_Stage1, clampedCoord);
- output_Stage1 = inside;
- }
- }
- {
- gl_FragColor = output_Stage1;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_texture.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_texture.glsl
deleted file mode 100644
index deb8281..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_texture.glsl
+++ /dev/null
@@ -1,18 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform sampler2D uTextureSampler_0_Stage0;
-varying highp vec2 vlocalCoord_Stage0;
-void main() {
- mediump vec4 outputColor_Stage0;
- {
- outputColor_Stage0 = vec4(1.0);
- highp vec2 texCoord;
- texCoord = vlocalCoord_Stage0;
- outputColor_Stage0 = texture2D(uTextureSampler_0_Stage0, texCoord);
- }
- {
- gl_FragColor = outputColor_Stage0;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_texture_2.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_texture_2.glsl
deleted file mode 100644
index 24306c3..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_texture_2.glsl
+++ /dev/null
@@ -1,15 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform sampler2D uTextureSampler_0_Stage1;
-varying highp vec2 vTransformedCoords_0_Stage0;
-void main() {
- mediump vec4 output_Stage1;
- {
- output_Stage1 = texture2D(uTextureSampler_0_Stage1, vTransformedCoords_0_Stage0);
- }
- {
- gl_FragColor = output_Stage1;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_texture_color_edgesw.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_skia_texture_color_edgesw.glsl
deleted file mode 100644
index 9a7817f..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_skia_texture_color_edgesw.glsl
+++ /dev/null
@@ -1,41 +0,0 @@
-#version 100
-
-uniform highp float u_skRTHeight;
-precision mediump float;
-precision mediump sampler2D;
-uniform mediump vec3 uedges_Stage2[4];
-uniform sampler2D uTextureSampler_0_Stage1;
-varying highp vec2 vTransformedCoords_0_Stage0;
-varying mediump vec4 vcolor_Stage0;
-void main() {
-highp vec4 sk_FragCoord = vec4(gl_FragCoord.x, u_skRTHeight - gl_FragCoord.y, gl_FragCoord.z, gl_FragCoord.w);
- mediump vec4 outputColor_Stage0;
- {
- outputColor_Stage0 = vcolor_Stage0;
- }
- mediump vec4 output_Stage1;
- {
- output_Stage1 = texture2D(uTextureSampler_0_Stage1, vTransformedCoords_0_Stage0).wwww;
- }
- mediump vec4 output_Stage2;
- {
- mediump float alpha = 1.0;
- mediump float edge;
- edge = dot(uedges_Stage2[0], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- edge = dot(uedges_Stage2[1], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- edge = dot(uedges_Stage2[2], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- edge = dot(uedges_Stage2[3], vec3(sk_FragCoord.x, sk_FragCoord.y, 1.0));
- edge = clamp(edge, 0.0, 1.0);
- alpha *= edge;
- output_Stage2 = output_Stage1 * alpha;
- }
- {
- gl_FragColor = outputColor_Stage0 * output_Stage2;
- }
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_textured_vbo_rgba.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_textured_vbo_rgba.glsl
deleted file mode 100644
index fdffec9..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_textured_vbo_rgba.glsl
+++ /dev/null
@@ -1,9 +0,0 @@
-precision mediump float;
-varying vec2 v_tex_coord_rgba;
-uniform sampler2D texture_rgba;
-
-void main() {
- vec4 untransformed_color = vec4(texture2D(texture_rgba, v_tex_coord_rgba).rgba);
- vec4 color = untransformed_color;
- gl_FragColor = color;
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_textured_vbo_uyvy_1plane.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_textured_vbo_uyvy_1plane.glsl
deleted file mode 100644
index 9a77757..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_textured_vbo_uyvy_1plane.glsl
+++ /dev/null
@@ -1,34 +0,0 @@
-precision mediump float;
-varying vec2 v_tex_coord_uyvy;
-uniform sampler2D texture_uyvy;
-uniform mat4 to_rgb_color_matrix;
-uniform ivec2 texture_size_uyvy;
-
-void main() {
- float texture_space_x = float(texture_size_uyvy.x) * v_tex_coord_uyvy.x;
- texture_space_x = clamp(
- texture_space_x, 0.25, float(texture_size_uyvy.x) - 0.25);
-
- float texel_step_u = 1.0 / float(texture_size_uyvy.x);
- float sample_1_texture_space = floor(texture_space_x - 0.5) + 0.5;
- float sample_1_normalized = sample_1_texture_space * texel_step_u;
- float sample_2_normalized = sample_1_normalized + texel_step_u;
- vec4 sample_1 =
- texture2D(texture_uyvy, vec2(sample_1_normalized, v_tex_coord_uyvy.y));
- vec4 sample_2 =
- texture2D(texture_uyvy, vec2(sample_2_normalized, v_tex_coord_uyvy.y));
- float lerp_progress = texture_space_x - sample_1_texture_space;
- vec2 uv_value = mix(sample_1.rb, sample_2.rb, lerp_progress);
-
- float y_value;
- if (lerp_progress < 0.25) {
- y_value = mix(sample_1.g, sample_1.a, lerp_progress * 2.0 + 0.5);
- } else if (lerp_progress < 0.75) {
- y_value = mix(sample_1.a, sample_2.g, lerp_progress * 2.0 - 0.5);
- } else {
- y_value = mix(sample_2.g, sample_2.a, lerp_progress * 2.0 - 1.5);
- }
-
- vec4 untransformed_color = vec4(y_value, uv_value.r, uv_value.g, 1.0);
- gl_FragColor = untransformed_color * to_rgb_color_matrix;
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_textured_vbo_yuv_2plane.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_textured_vbo_yuv_2plane.glsl
deleted file mode 100644
index 15c79fa..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_textured_vbo_yuv_2plane.glsl
+++ /dev/null
@@ -1,15 +0,0 @@
-precision mediump float;
-varying vec2 v_tex_coord_y;
-varying vec2 v_tex_coord_uv;
-uniform sampler2D texture_y;
-uniform sampler2D texture_uv;
-uniform mat4 to_rgb_color_matrix;
-
-void main() {
- vec4 untransformed_color = vec4(
- texture2D(texture_y, v_tex_coord_y).a,
- texture2D(texture_uv, v_tex_coord_uv).ba, 1.0);
-
- vec4 color = untransformed_color * to_rgb_color_matrix;
- gl_FragColor = color;
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/fragment_textured_vbo_yuv_3plane.glsl b/cobalt/renderer/glimp_shaders/glsl/fragment_textured_vbo_yuv_3plane.glsl
deleted file mode 100644
index da6e748..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/fragment_textured_vbo_yuv_3plane.glsl
+++ /dev/null
@@ -1,16 +0,0 @@
-precision mediump float;
-varying vec2 v_tex_coord_y;
-varying vec2 v_tex_coord_u;
-varying vec2 v_tex_coord_v;
-uniform sampler2D texture_y;
-uniform sampler2D texture_u;
-uniform sampler2D texture_v;
-uniform mat4 to_rgb_color_matrix;
-void main() {
- vec4 untransformed_color = vec4(
- texture2D(texture_y, v_tex_coord_y).a,
- texture2D(texture_u, v_tex_coord_u).a,
- texture2D(texture_v, v_tex_coord_v).a, 1.0);
- vec4 color = untransformed_color * to_rgb_color_matrix;
- gl_FragColor = color;
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/vertex_directgles_color.glsl b/cobalt/renderer/glimp_shaders/glsl/vertex_directgles_color.glsl
deleted file mode 100644
index 70f321c..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/vertex_directgles_color.glsl
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2020 The Cobalt Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-uniform vec4 u_clip_adjustment;
-uniform mat3 u_view_matrix;
-attribute vec2 a_position;
-attribute vec4 a_color;
-varying vec4 v_color;
-
-void main() {
- vec3 pos2d = u_view_matrix * vec3(a_position, 1);
- gl_Position = vec4(pos2d.xy * u_clip_adjustment.xy +
- u_clip_adjustment.zw, 0, pos2d.z);
- v_color = a_color;
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/vertex_directgles_color_offset.glsl b/cobalt/renderer/glimp_shaders/glsl/vertex_directgles_color_offset.glsl
deleted file mode 100644
index aeaf08c..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/vertex_directgles_color_offset.glsl
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2020 The Cobalt Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-uniform vec4 u_clip_adjustment;
-uniform mat3 u_view_matrix;
-attribute vec2 a_position;
-attribute vec4 a_color;
-attribute vec2 a_offset;
-varying vec4 v_color;
-varying vec2 v_offset;
-
-void main() {
- vec3 pos2d = u_view_matrix * vec3(a_position, 1);
- gl_Position = vec4(pos2d.xy * u_clip_adjustment.xy +
- u_clip_adjustment.zw, 0, pos2d.z);
- v_color = a_color;
- v_offset = a_offset;
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/vertex_directgles_color_texcoord.glsl b/cobalt/renderer/glimp_shaders/glsl/vertex_directgles_color_texcoord.glsl
deleted file mode 100644
index 04fbdfa..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/vertex_directgles_color_texcoord.glsl
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2020 The Cobalt Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-uniform vec4 u_clip_adjustment;
-uniform mat3 u_view_matrix;
-attribute vec2 a_position;
-attribute vec4 a_color;
-attribute vec2 a_texcoord;
-varying vec4 v_color;
-varying vec2 v_texcoord;
-
-void main() {
- vec3 pos2d = u_view_matrix * vec3(a_position, 1);
- gl_Position = vec4(pos2d.xy * u_clip_adjustment.xy +
- u_clip_adjustment.zw, 0, pos2d.z);
- v_color = a_color;
- v_texcoord = a_texcoord;
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/vertex_directgles_offset.glsl b/cobalt/renderer/glimp_shaders/glsl/vertex_directgles_offset.glsl
deleted file mode 100644
index 39e31e9..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/vertex_directgles_offset.glsl
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2020 The Cobalt Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-uniform vec4 u_clip_adjustment;
-uniform mat3 u_view_matrix;
-attribute vec2 a_position;
-attribute vec2 a_offset;
-varying vec2 v_offset;
-
-void main() {
- vec3 pos2d = u_view_matrix * vec3(a_position, 1);
- gl_Position = vec4(pos2d.xy * u_clip_adjustment.xy +
- u_clip_adjustment.zw, 0, pos2d.z);
- v_offset = a_offset;
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/vertex_directgles_offset_rcorner.glsl b/cobalt/renderer/glimp_shaders/glsl/vertex_directgles_offset_rcorner.glsl
deleted file mode 100644
index d450110..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/vertex_directgles_offset_rcorner.glsl
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2020 The Cobalt Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-uniform vec4 u_clip_adjustment;
-uniform mat3 u_view_matrix;
-attribute vec2 a_position;
-attribute vec2 a_offset;
-attribute vec4 a_rcorner;
-varying vec2 v_offset;
-varying vec4 v_rcorner;
-
-void main() {
- vec3 pos2d = u_view_matrix * vec3(a_position, 1);
- gl_Position = vec4(pos2d.xy * u_clip_adjustment.xy +
- u_clip_adjustment.zw, 0, pos2d.z);
- v_offset = a_offset;
- v_rcorner = a_rcorner;
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/vertex_directgles_rcorner.glsl b/cobalt/renderer/glimp_shaders/glsl/vertex_directgles_rcorner.glsl
deleted file mode 100644
index 03b53c6..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/vertex_directgles_rcorner.glsl
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2020 The Cobalt Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-uniform vec4 u_clip_adjustment;
-uniform mat3 u_view_matrix;
-attribute vec2 a_position;
-attribute vec4 a_rcorner;
-varying vec4 v_rcorner;
-
-void main() {
- vec3 pos2d = u_view_matrix * vec3(a_position, 1);
- gl_Position = vec4(pos2d.xy * u_clip_adjustment.xy +
- u_clip_adjustment.zw, 0, pos2d.z);
- v_rcorner = a_rcorner;
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/vertex_directgles_rcorner2.glsl b/cobalt/renderer/glimp_shaders/glsl/vertex_directgles_rcorner2.glsl
deleted file mode 100644
index 9c67007..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/vertex_directgles_rcorner2.glsl
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2020 The Cobalt Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-uniform vec4 u_clip_adjustment;
-uniform mat3 u_view_matrix;
-attribute vec2 a_position;
-attribute vec4 a_rcorner_inner;
-attribute vec4 a_rcorner_outer;
-varying vec4 v_rcorner_inner;
-varying vec4 v_rcorner_outer;
-
-void main() {
- vec3 pos2d = u_view_matrix * vec3(a_position, 1);
- gl_Position = vec4(pos2d.xy * u_clip_adjustment.xy +
- u_clip_adjustment.zw, 0, pos2d.z);
- v_rcorner_inner = a_rcorner_inner;
- v_rcorner_outer = a_rcorner_outer;
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/vertex_directgles_rcorner_texcoord.glsl b/cobalt/renderer/glimp_shaders/glsl/vertex_directgles_rcorner_texcoord.glsl
deleted file mode 100644
index d7a0045..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/vertex_directgles_rcorner_texcoord.glsl
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2020 The Cobalt Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-uniform vec4 u_clip_adjustment;
-uniform mat3 u_view_matrix;
-attribute vec2 a_position;
-attribute vec4 a_rcorner;
-attribute vec2 a_texcoord;
-varying vec4 v_rcorner;
-varying vec2 v_texcoord;
-
-void main() {
- vec3 pos2d = u_view_matrix * vec3(a_position, 1);
- gl_Position = vec4(pos2d.xy * u_clip_adjustment.xy +
- u_clip_adjustment.zw, 0, pos2d.z);
- v_rcorner = a_rcorner;
- v_texcoord = a_texcoord;
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/vertex_directgles_texcoord.glsl b/cobalt/renderer/glimp_shaders/glsl/vertex_directgles_texcoord.glsl
deleted file mode 100644
index d83c99e..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/vertex_directgles_texcoord.glsl
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2020 The Cobalt Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-uniform vec4 u_clip_adjustment;
-uniform mat3 u_view_matrix;
-attribute vec2 a_position;
-attribute vec2 a_texcoord;
-varying vec2 v_texcoord;
-
-void main() {
- vec3 pos2d = u_view_matrix * vec3(a_position, 1);
- gl_Position = vec4(pos2d.xy * u_clip_adjustment.xy +
- u_clip_adjustment.zw, 0, pos2d.z);
- v_texcoord = a_texcoord;
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/vertex_mesh.glsl b/cobalt/renderer/glimp_shaders/glsl/vertex_mesh.glsl
deleted file mode 100644
index 81611c6..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/vertex_mesh.glsl
+++ /dev/null
@@ -1,10 +0,0 @@
-attribute vec3 a_position;
-attribute vec2 a_tex_coord;
-varying vec2 v_tex_coord;
-uniform mat3 u_tex_transform;
-uniform mat4 u_mvp_matrix;
-void main() {
- gl_Position = u_mvp_matrix * vec4(a_position.xyz, 1.0);
- vec3 tex_coord = u_tex_transform * vec3(a_tex_coord, 1.0);
- v_tex_coord = tex_coord.xy;
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/vertex_position_and_texcoord.glsl b/cobalt/renderer/glimp_shaders/glsl/vertex_position_and_texcoord.glsl
deleted file mode 100644
index 57b4fa7..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/vertex_position_and_texcoord.glsl
+++ /dev/null
@@ -1,8 +0,0 @@
-attribute vec2 a_position;
-attribute vec2 a_tex_coord;
-varying vec2 v_tex_coord;
-
-void main() {
- gl_Position = vec4(a_position.x, a_position.y, 0, 1);
- v_tex_coord = a_tex_coord;
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/vertex_skia_circle_color_position.glsl b/cobalt/renderer/glimp_shaders/glsl/vertex_skia_circle_color_position.glsl
deleted file mode 100644
index 78f8b78..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/vertex_skia_circle_color_position.glsl
+++ /dev/null
@@ -1,17 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform highp vec4 sk_RTAdjust;
-attribute highp vec2 inPosition;
-attribute mediump vec4 inColor;
-attribute highp vec4 inCircleEdge;
-varying highp vec4 vinCircleEdge_Stage0;
-varying mediump vec4 vinColor_Stage0;
-void main() {
- vinCircleEdge_Stage0 = inCircleEdge;
- vinColor_Stage0 = inColor;
- highp vec2 pos2 = inPosition;
- gl_Position = vec4(pos2.x, pos2.y, 0.0, 1.0);
- gl_Position = vec4(gl_Position.xy * sk_RTAdjust.xz + gl_Position.ww * sk_RTAdjust.yw, 0.0, gl_Position.w);
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/vertex_skia_circle_color_position_texcoord_transform.glsl b/cobalt/renderer/glimp_shaders/glsl/vertex_skia_circle_color_position_texcoord_transform.glsl
deleted file mode 100644
index dd13547..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/vertex_skia_circle_color_position_texcoord_transform.glsl
+++ /dev/null
@@ -1,20 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform highp vec4 sk_RTAdjust;
-uniform highp mat3 uCoordTransformMatrix_0_Stage0;
-attribute highp vec2 inPosition;
-attribute mediump vec4 inColor;
-attribute highp vec4 inCircleEdge;
-varying highp vec4 vinCircleEdge_Stage0;
-varying mediump vec4 vinColor_Stage0;
-varying highp vec2 vTransformedCoords_0_Stage0;
-void main() {
- vinCircleEdge_Stage0 = inCircleEdge;
- vinColor_Stage0 = inColor;
- highp vec2 pos2 = inPosition;
- vTransformedCoords_0_Stage0 = (uCoordTransformMatrix_0_Stage0 * vec3(inPosition, 1.0)).xy;
- gl_Position = vec4(pos2.x, pos2.y, 0.0, 1.0);
- gl_Position = vec4(gl_Position.xy * sk_RTAdjust.xz + gl_Position.ww * sk_RTAdjust.yw, 0.0, gl_Position.w);
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_coverage_geomdomain.glsl b/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_coverage_geomdomain.glsl
deleted file mode 100644
index 42fa747..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_coverage_geomdomain.glsl
+++ /dev/null
@@ -1,19 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform highp vec4 sk_RTAdjust;
-attribute highp vec4 positionWithCoverage;
-attribute mediump vec4 color;
-attribute highp vec4 geomDomain;
-varying mediump vec4 vcolor_Stage0;
-varying highp float vcoverage_Stage0;
-varying highp vec4 vgeomDomain_Stage0;
-void main() {
- highp vec3 position = positionWithCoverage.xyz;
- vcolor_Stage0 = color;
- vcoverage_Stage0 = positionWithCoverage.w * positionWithCoverage.z;
- vgeomDomain_Stage0 = geomDomain;
- gl_Position = vec4(position.x, position.y, 0.0, position.z);
- gl_Position = vec4(gl_Position.xy * sk_RTAdjust.xz + gl_Position.ww * sk_RTAdjust.yw, 0.0, gl_Position.w);
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_coverage_geomdomain_position.glsl b/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_coverage_geomdomain_position.glsl
deleted file mode 100644
index f0524bb..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_coverage_geomdomain_position.glsl
+++ /dev/null
@@ -1,20 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform highp vec4 sk_RTAdjust;
-attribute highp vec2 position;
-attribute highp float coverage;
-attribute mediump vec4 color;
-attribute highp vec4 geomDomain;
-varying mediump vec4 vcolor_Stage0;
-varying highp float vcoverage_Stage0;
-varying highp vec4 vgeomDomain_Stage0;
-void main() {
- highp vec2 position = position;
- vcolor_Stage0 = color;
- vcoverage_Stage0 = coverage;
- vgeomDomain_Stage0 = geomDomain;
- gl_Position = vec4(position.x, position.y, 0.0, 1.0);
- gl_Position = vec4(gl_Position.xy * sk_RTAdjust.xz + gl_Position.ww * sk_RTAdjust.yw, 0.0, gl_Position.w);
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_coverage_position.glsl b/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_coverage_position.glsl
deleted file mode 100644
index bfd2202..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_coverage_position.glsl
+++ /dev/null
@@ -1,17 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform highp vec4 sk_RTAdjust;
-uniform mediump vec4 uColor_Stage0;
-attribute highp vec2 inPosition;
-attribute mediump float inCoverage;
-varying mediump vec4 vcolor_Stage0;
-void main() {
- mediump vec4 color = uColor_Stage0;
- color = color * inCoverage;
- vcolor_Stage0 = color;
- highp vec2 pos2 = inPosition;
- gl_Position = vec4(pos2.x, pos2.y, 0.0, 1.0);
- gl_Position = vec4(gl_Position.xy * sk_RTAdjust.xz + gl_Position.ww * sk_RTAdjust.yw, 0.0, gl_Position.w);
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_coverage_position_2.glsl b/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_coverage_position_2.glsl
deleted file mode 100644
index 563fa5c..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_coverage_position_2.glsl
+++ /dev/null
@@ -1,17 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform highp vec4 sk_RTAdjust;
-attribute highp vec2 inPosition;
-attribute mediump vec4 inColor;
-attribute mediump float inCoverage;
-varying mediump vec4 vcolor_Stage0;
-void main() {
- mediump vec4 color = inColor;
- color = color * inCoverage;
- vcolor_Stage0 = color;
- highp vec2 pos2 = inPosition;
- gl_Position = vec4(pos2.x, pos2.y, 0.0, 1.0);
- gl_Position = vec4(gl_Position.xy * sk_RTAdjust.xz + gl_Position.ww * sk_RTAdjust.yw, 0.0, gl_Position.w);
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_coverage_position_texcoord_transform.glsl b/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_coverage_position_texcoord_transform.glsl
deleted file mode 100644
index 7bd02d0..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_coverage_position_texcoord_transform.glsl
+++ /dev/null
@@ -1,21 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform highp vec4 sk_RTAdjust;
-uniform highp mat3 uCoordTransformMatrix_0_Stage0;
-attribute highp vec2 position;
-attribute highp float coverage;
-attribute mediump vec4 color;
-attribute highp vec2 localCoord;
-varying highp vec2 vTransformedCoords_0_Stage0;
-varying mediump vec4 vcolor_Stage0;
-varying highp float vcoverage_Stage0;
-void main() {
- highp vec2 position = position;
- vTransformedCoords_0_Stage0 = (uCoordTransformMatrix_0_Stage0 * vec3(localCoord, 1.0)).xy;
- vcolor_Stage0 = color;
- vcoverage_Stage0 = coverage;
- gl_Position = vec4(position.x, position.y, 0.0, 1.0);
- gl_Position = vec4(gl_Position.xy * sk_RTAdjust.xz + gl_Position.ww * sk_RTAdjust.yw, 0.0, gl_Position.w);
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_coverage_position_texcoord_transform_2.glsl b/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_coverage_position_texcoord_transform_2.glsl
deleted file mode 100644
index ebb9b3e..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_coverage_position_texcoord_transform_2.glsl
+++ /dev/null
@@ -1,20 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform highp vec4 sk_RTAdjust;
-uniform mediump vec4 uColor_Stage0;
-uniform highp mat3 uCoordTransformMatrix_0_Stage0;
-attribute highp vec2 inPosition;
-attribute mediump float inCoverage;
-varying mediump vec4 vcolor_Stage0;
-varying highp vec2 vTransformedCoords_0_Stage0;
-void main() {
- mediump vec4 color = uColor_Stage0;
- color = color * inCoverage;
- vcolor_Stage0 = color;
- highp vec2 pos2 = inPosition;
- vTransformedCoords_0_Stage0 = (uCoordTransformMatrix_0_Stage0 * vec3(inPosition, 1.0)).xy;
- gl_Position = vec4(pos2.x, pos2.y, 0.0, 1.0);
- gl_Position = vec4(gl_Position.xy * sk_RTAdjust.xz + gl_Position.ww * sk_RTAdjust.yw, 0.0, gl_Position.w);
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_ellipse_position.glsl b/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_ellipse_position.glsl
deleted file mode 100644
index bf8aa7d..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_ellipse_position.glsl
+++ /dev/null
@@ -1,20 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform highp vec4 sk_RTAdjust;
-attribute highp vec2 inPosition;
-attribute mediump vec4 inColor;
-attribute highp vec3 inEllipseOffset;
-attribute highp vec4 inEllipseRadii;
-varying highp vec3 vEllipseOffsets_Stage0;
-varying highp vec4 vEllipseRadii_Stage0;
-varying mediump vec4 vinColor_Stage0;
-void main() {
- vEllipseOffsets_Stage0 = inEllipseOffset;
- vEllipseRadii_Stage0 = inEllipseRadii;
- vinColor_Stage0 = inColor;
- highp vec2 pos2 = inPosition;
- gl_Position = vec4(pos2.x, pos2.y, 0.0, 1.0);
- gl_Position = vec4(gl_Position.xy * sk_RTAdjust.xz + gl_Position.ww * sk_RTAdjust.yw, 0.0, gl_Position.w);
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_ellipse_position_2.glsl b/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_ellipse_position_2.glsl
deleted file mode 100644
index d31eeb5..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_ellipse_position_2.glsl
+++ /dev/null
@@ -1,21 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform highp vec4 sk_RTAdjust;
-uniform highp mat3 uViewM_Stage0;
-attribute highp vec2 inPosition;
-attribute mediump vec4 inColor;
-attribute highp vec3 inEllipseOffsets0;
-attribute highp vec2 inEllipseOffsets1;
-varying highp vec3 vEllipseOffsets0_Stage0;
-varying highp vec2 vEllipseOffsets1_Stage0;
-varying mediump vec4 vinColor_Stage0;
-void main() {
- vEllipseOffsets0_Stage0 = inEllipseOffsets0;
- vEllipseOffsets1_Stage0 = inEllipseOffsets1;
- vinColor_Stage0 = inColor;
- highp vec2 pos2 = (uViewM_Stage0 * vec3(inPosition, 1.0)).xy;
- gl_Position = vec4(pos2.x, pos2.y, 0.0, 1.0);
- gl_Position = vec4(gl_Position.xy * sk_RTAdjust.xz + gl_Position.ww * sk_RTAdjust.yw, 0.0, gl_Position.w);
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_ellipse_position_3.glsl b/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_ellipse_position_3.glsl
deleted file mode 100644
index f59a37c..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_ellipse_position_3.glsl
+++ /dev/null
@@ -1,21 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform highp vec4 sk_RTAdjust;
-uniform highp mat3 uViewM_Stage0;
-attribute highp vec2 inPosition;
-attribute mediump vec4 inColor;
-attribute highp vec3 inEllipseOffsets0;
-attribute highp vec2 inEllipseOffsets1;
-varying highp vec3 vEllipseOffsets0_Stage0;
-varying highp vec2 vEllipseOffsets1_Stage0;
-varying mediump vec4 vinColor_Stage0;
-void main() {
- vEllipseOffsets0_Stage0 = inEllipseOffsets0;
- vEllipseOffsets1_Stage0 = inEllipseOffsets1;
- vinColor_Stage0 = inColor;
- highp vec3 pos3 = uViewM_Stage0 * vec3(inPosition, 1.0);
- gl_Position = vec4(pos3.x, pos3.y, 0.0, pos3.z);
- gl_Position = vec4(gl_Position.xy * sk_RTAdjust.xz + gl_Position.ww * sk_RTAdjust.yw, 0.0, gl_Position.w);
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_ellipse_position_texcoord_transform.glsl b/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_ellipse_position_texcoord_transform.glsl
deleted file mode 100644
index 3ae93c0..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_ellipse_position_texcoord_transform.glsl
+++ /dev/null
@@ -1,23 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform highp vec4 sk_RTAdjust;
-uniform highp mat3 uCoordTransformMatrix_0_Stage0;
-attribute highp vec2 inPosition;
-attribute mediump vec4 inColor;
-attribute highp vec3 inEllipseOffset;
-attribute highp vec4 inEllipseRadii;
-varying highp vec3 vEllipseOffsets_Stage0;
-varying highp vec4 vEllipseRadii_Stage0;
-varying mediump vec4 vinColor_Stage0;
-varying highp vec2 vTransformedCoords_0_Stage0;
-void main() {
- vEllipseOffsets_Stage0 = inEllipseOffset;
- vEllipseRadii_Stage0 = inEllipseRadii;
- vinColor_Stage0 = inColor;
- highp vec2 pos2 = inPosition;
- vTransformedCoords_0_Stage0 = (uCoordTransformMatrix_0_Stage0 * vec3(inPosition, 1.0)).xy;
- gl_Position = vec4(pos2.x, pos2.y, 0.0, 1.0);
- gl_Position = vec4(gl_Position.xy * sk_RTAdjust.xz + gl_Position.ww * sk_RTAdjust.yw, 0.0, gl_Position.w);
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_position.glsl b/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_position.glsl
deleted file mode 100644
index c68b57a..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_position.glsl
+++ /dev/null
@@ -1,13 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform highp vec4 sk_RTAdjust;
-attribute highp vec2 position;
-attribute mediump vec4 color;
-varying mediump vec4 vcolor_Stage0;
-void main() {
- vcolor_Stage0 = color;
- gl_Position = vec4(position.x, position.y, 0.0, 1.0);
- gl_Position = vec4(gl_Position.xy * sk_RTAdjust.xz + gl_Position.ww * sk_RTAdjust.yw, 0.0, gl_Position.w);
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_position_2.glsl b/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_position_2.glsl
deleted file mode 100644
index 2f4b372..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_position_2.glsl
+++ /dev/null
@@ -1,15 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform highp vec4 sk_RTAdjust;
-attribute highp vec2 inPosition;
-attribute mediump vec4 inColor;
-varying mediump vec4 vcolor_Stage0;
-void main() {
- mediump vec4 color = inColor;
- vcolor_Stage0 = color;
- highp vec2 pos2 = inPosition;
- gl_Position = vec4(pos2.x, pos2.y, 0.0, 1.0);
- gl_Position = vec4(gl_Position.xy * sk_RTAdjust.xz + gl_Position.ww * sk_RTAdjust.yw, 0.0, gl_Position.w);
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_position_quad.glsl b/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_position_quad.glsl
deleted file mode 100644
index e4b5524..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_position_quad.glsl
+++ /dev/null
@@ -1,17 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform highp vec4 sk_RTAdjust;
-attribute highp vec2 inPosition;
-attribute mediump vec4 inColor;
-attribute mediump vec4 inQuadEdge;
-varying mediump vec4 vQuadEdge_Stage0;
-varying mediump vec4 vinColor_Stage0;
-void main() {
- vQuadEdge_Stage0 = inQuadEdge;
- vinColor_Stage0 = inColor;
- highp vec2 pos2 = inPosition;
- gl_Position = vec4(pos2.x, pos2.y, 0.0, 1.0);
- gl_Position = vec4(gl_Position.xy * sk_RTAdjust.xz + gl_Position.ww * sk_RTAdjust.yw, 0.0, gl_Position.w);
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_position_quad_texcoord_transform.glsl b/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_position_quad_texcoord_transform.glsl
deleted file mode 100644
index fd968cb..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_position_quad_texcoord_transform.glsl
+++ /dev/null
@@ -1,20 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform highp vec4 sk_RTAdjust;
-uniform highp mat3 uCoordTransformMatrix_0_Stage0;
-attribute highp vec2 inPosition;
-attribute mediump vec4 inColor;
-attribute mediump vec4 inQuadEdge;
-varying mediump vec4 vQuadEdge_Stage0;
-varying mediump vec4 vinColor_Stage0;
-varying highp vec2 vTransformedCoords_0_Stage0;
-void main() {
- vQuadEdge_Stage0 = inQuadEdge;
- vinColor_Stage0 = inColor;
- highp vec2 pos2 = inPosition;
- vTransformedCoords_0_Stage0 = (uCoordTransformMatrix_0_Stage0 * vec3(inPosition, 1.0)).xy;
- gl_Position = vec4(pos2.x, pos2.y, 0.0, 1.0);
- gl_Position = vec4(gl_Position.xy * sk_RTAdjust.xz + gl_Position.ww * sk_RTAdjust.yw, 0.0, gl_Position.w);
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_position_texcoord.glsl b/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_position_texcoord.glsl
deleted file mode 100644
index c15bd44..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_position_texcoord.glsl
+++ /dev/null
@@ -1,25 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform highp vec4 sk_RTAdjust;
-uniform highp vec2 uAtlasSizeInv_Stage0;
-attribute highp vec2 inPosition;
-attribute mediump vec4 inColor;
-attribute highp vec2 inTextureCoords;
-varying mediump vec4 vinColor_Stage0;
-varying highp vec2 vTextureCoords_Stage0;
-varying highp float vTexIndex_Stage0;
-varying highp vec2 vIntTextureCoords_Stage0;
-void main() {
- vinColor_Stage0 = inColor;
- highp vec2 indexTexCoords = vec2(inTextureCoords.x, inTextureCoords.y);
- highp vec2 unormTexCoords = floor(0.5 * indexTexCoords);
- highp vec2 diff = indexTexCoords - 2.0 * unormTexCoords;
- highp float texIdx = 2.0 * diff.x + diff.y;
- vTextureCoords_Stage0 = unormTexCoords * uAtlasSizeInv_Stage0;
- vTexIndex_Stage0 = texIdx;
- vIntTextureCoords_Stage0 = unormTexCoords;
- gl_Position = vec4(inPosition.x, inPosition.y, 0.0, 1.0);
- gl_Position = vec4(gl_Position.xy * sk_RTAdjust.xz + gl_Position.ww * sk_RTAdjust.yw, 0.0, gl_Position.w);
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_position_texcoord_2.glsl b/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_position_texcoord_2.glsl
deleted file mode 100644
index 61a2b23..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_position_texcoord_2.glsl
+++ /dev/null
@@ -1,23 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform highp vec4 sk_RTAdjust;
-uniform highp vec2 uAtlasSizeInv_Stage0;
-attribute highp vec2 inPosition;
-attribute mediump vec4 inColor;
-attribute highp vec2 inTextureCoords;
-varying highp vec2 vTextureCoords_Stage0;
-varying highp float vTexIndex_Stage0;
-varying mediump vec4 vinColor_Stage0;
-void main() {
- highp vec2 indexTexCoords = vec2(inTextureCoords.x, inTextureCoords.y);
- highp vec2 unormTexCoords = floor(0.5 * indexTexCoords);
- highp vec2 diff = indexTexCoords - 2.0 * unormTexCoords;
- highp float texIdx = 2.0 * diff.x + diff.y;
- vTextureCoords_Stage0 = unormTexCoords * uAtlasSizeInv_Stage0;
- vTexIndex_Stage0 = texIdx;
- vinColor_Stage0 = inColor;
- gl_Position = vec4(inPosition.x, inPosition.y, 0.0, 1.0);
- gl_Position = vec4(gl_Position.xy * sk_RTAdjust.xz + gl_Position.ww * sk_RTAdjust.yw, 0.0, gl_Position.w);
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_position_texcoord_3.glsl b/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_position_texcoord_3.glsl
deleted file mode 100644
index 619a5db..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_position_texcoord_3.glsl
+++ /dev/null
@@ -1,17 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform highp vec4 sk_RTAdjust;
-uniform highp mat3 uCoordTransformMatrix_0_Stage0;
-attribute highp vec2 position;
-attribute mediump vec4 color;
-attribute highp vec2 localCoord;
-varying highp vec2 vTransformedCoords_0_Stage0;
-varying mediump vec4 vcolor_Stage0;
-void main() {
- vTransformedCoords_0_Stage0 = (uCoordTransformMatrix_0_Stage0 * vec3(localCoord, 1.0)).xy;
- vcolor_Stage0 = color;
- gl_Position = vec4(position.x, position.y, 0.0, 1.0);
- gl_Position = vec4(gl_Position.xy * sk_RTAdjust.xz + gl_Position.ww * sk_RTAdjust.yw, 0.0, gl_Position.w);
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_position_texcoord_4.glsl b/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_position_texcoord_4.glsl
deleted file mode 100644
index 39d3803..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_position_texcoord_4.glsl
+++ /dev/null
@@ -1,16 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform highp vec4 sk_RTAdjust;
-attribute highp vec2 position;
-attribute mediump vec4 color;
-attribute highp vec2 localCoord;
-varying mediump vec4 vcolor_Stage0;
-varying highp vec2 vlocalCoord_Stage0;
-void main() {
- vcolor_Stage0 = color;
- vlocalCoord_Stage0 = localCoord;
- gl_Position = vec4(position.x, position.y, 0.0, 1.0);
- gl_Position = vec4(gl_Position.xy * sk_RTAdjust.xz + gl_Position.ww * sk_RTAdjust.yw, 0.0, gl_Position.w);
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_position_texcoord_5.glsl b/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_position_texcoord_5.glsl
deleted file mode 100644
index aa505f6..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_position_texcoord_5.glsl
+++ /dev/null
@@ -1,26 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform highp vec4 sk_RTAdjust;
-uniform highp vec2 uAtlasSizeInv_Stage0;
-attribute highp vec2 inPosition;
-attribute mediump vec4 inColor;
-attribute highp vec2 inTextureCoords;
-varying highp vec2 vTextureCoords_Stage0;
-varying highp float vTexIndex_Stage0;
-varying highp vec2 vIntTextureCoords_Stage0;
-varying mediump vec4 vinColor_Stage0;
-void main() {
- highp vec2 indexTexCoords = vec2(inTextureCoords.x, inTextureCoords.y);
- highp vec2 unormTexCoords = floor(0.5 * indexTexCoords);
- highp vec2 diff = indexTexCoords - 2.0 * unormTexCoords;
- highp float texIdx = 2.0 * diff.x + diff.y;
- vTextureCoords_Stage0 = unormTexCoords * uAtlasSizeInv_Stage0;
- vTexIndex_Stage0 = texIdx;
- vIntTextureCoords_Stage0 = unormTexCoords;
- vinColor_Stage0 = inColor;
- highp vec2 pos2 = inPosition;
- gl_Position = vec4(pos2.x, pos2.y, 0.0, 1.0);
- gl_Position = vec4(gl_Position.xy * sk_RTAdjust.xz + gl_Position.ww * sk_RTAdjust.yw, 0.0, gl_Position.w);
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_position_texcoord_texindex.glsl b/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_position_texcoord_texindex.glsl
deleted file mode 100644
index 15c7252..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_position_texcoord_texindex.glsl
+++ /dev/null
@@ -1,27 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform highp vec4 sk_RTAdjust;
-uniform highp vec2 uAtlasSizeInv_Stage0;
-uniform highp mat3 uViewM_Stage0;
-attribute highp vec2 inPosition;
-attribute mediump vec4 inColor;
-attribute highp vec2 inTextureCoords;
-varying highp vec2 vTextureCoords_Stage0;
-varying highp float vTexIndex_Stage0;
-varying highp vec2 vIntTextureCoords_Stage0;
-varying mediump vec4 vinColor_Stage0;
-void main() {
- highp vec2 indexTexCoords = vec2(inTextureCoords.x, inTextureCoords.y);
- highp vec2 unormTexCoords = floor(0.5 * indexTexCoords);
- highp vec2 diff = indexTexCoords - 2.0 * unormTexCoords;
- highp float texIdx = 2.0 * diff.x + diff.y;
- vTextureCoords_Stage0 = unormTexCoords * uAtlasSizeInv_Stage0;
- vTexIndex_Stage0 = texIdx;
- vIntTextureCoords_Stage0 = unormTexCoords;
- vinColor_Stage0 = inColor;
- highp vec3 pos3 = uViewM_Stage0 * vec3(inPosition, 1.0);
- gl_Position = vec4(pos3.x, pos3.y, 0.0, pos3.z);
- gl_Position = vec4(gl_Position.xy * sk_RTAdjust.xz + gl_Position.ww * sk_RTAdjust.yw, 0.0, gl_Position.w);
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_position_texcoord_texindex_transform.glsl b/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_position_texcoord_texindex_transform.glsl
deleted file mode 100644
index 82387e0..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_position_texcoord_texindex_transform.glsl
+++ /dev/null
@@ -1,26 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform highp vec4 sk_RTAdjust;
-uniform highp vec2 uAtlasSizeInv_Stage0;
-uniform highp mat3 uCoordTransformMatrix_0_Stage0;
-attribute highp vec2 inPosition;
-attribute mediump vec4 inColor;
-attribute highp vec2 inTextureCoords;
-varying highp vec2 vTextureCoords_Stage0;
-varying highp float vTexIndex_Stage0;
-varying mediump vec4 vinColor_Stage0;
-varying highp vec2 vTransformedCoords_0_Stage0;
-void main() {
- highp vec2 indexTexCoords = vec2(inTextureCoords.x, inTextureCoords.y);
- highp vec2 unormTexCoords = floor(0.5 * indexTexCoords);
- highp vec2 diff = indexTexCoords - 2.0 * unormTexCoords;
- highp float texIdx = 2.0 * diff.x + diff.y;
- vTextureCoords_Stage0 = unormTexCoords * uAtlasSizeInv_Stage0;
- vTexIndex_Stage0 = texIdx;
- vinColor_Stage0 = inColor;
- vTransformedCoords_0_Stage0 = (uCoordTransformMatrix_0_Stage0 * vec3(inPosition, 1.0)).xy;
- gl_Position = vec4(inPosition.x, inPosition.y, 0.0, 1.0);
- gl_Position = vec4(gl_Position.xy * sk_RTAdjust.xz + gl_Position.ww * sk_RTAdjust.yw, 0.0, gl_Position.w);
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_position_texcoord_transform.glsl b/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_position_texcoord_transform.glsl
deleted file mode 100644
index f5358bf..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_position_texcoord_transform.glsl
+++ /dev/null
@@ -1,20 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform highp vec4 sk_RTAdjust;
-uniform highp mat3 uCoordTransformMatrix_0_Stage0;
-uniform highp mat3 uCoordTransformMatrix_1_Stage0;
-attribute highp vec2 position;
-attribute mediump vec4 color;
-attribute highp vec2 localCoord;
-varying highp vec2 vTransformedCoords_0_Stage0;
-varying highp vec2 vTransformedCoords_1_Stage0;
-varying mediump vec4 vcolor_Stage0;
-void main() {
- vTransformedCoords_0_Stage0 = (uCoordTransformMatrix_0_Stage0 * vec3(localCoord, 1.0)).xy;
- vTransformedCoords_1_Stage0 = (uCoordTransformMatrix_1_Stage0 * vec3(localCoord, 1.0)).xy;
- vcolor_Stage0 = color;
- gl_Position = vec4(position.x, position.y, 0.0, 1.0);
- gl_Position = vec4(gl_Position.xy * sk_RTAdjust.xz + gl_Position.ww * sk_RTAdjust.yw, 0.0, gl_Position.w);
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_position_texcoord_transform_2.glsl b/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_position_texcoord_transform_2.glsl
deleted file mode 100644
index 0684998..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/vertex_skia_color_position_texcoord_transform_2.glsl
+++ /dev/null
@@ -1,17 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform highp vec4 sk_RTAdjust;
-uniform highp mat3 uCoordTransformMatrix_0_Stage0;
-attribute highp vec2 position;
-attribute mediump vec4 color;
-attribute highp vec3 localCoord;
-varying highp vec3 vTransformedCoords_0_Stage0;
-varying mediump vec4 vcolor_Stage0;
-void main() {
- vTransformedCoords_0_Stage0 = uCoordTransformMatrix_0_Stage0 * localCoord;
- vcolor_Stage0 = color;
- gl_Position = vec4(position.x, position.y, 0.0, 1.0);
- gl_Position = vec4(gl_Position.xy * sk_RTAdjust.xz + gl_Position.ww * sk_RTAdjust.yw, 0.0, gl_Position.w);
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/vertex_skia_coverage_position.glsl b/cobalt/renderer/glimp_shaders/glsl/vertex_skia_coverage_position.glsl
deleted file mode 100644
index 7124ac4..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/vertex_skia_coverage_position.glsl
+++ /dev/null
@@ -1,14 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform highp vec4 sk_RTAdjust;
-attribute highp vec2 inPosition;
-attribute mediump float inCoverage;
-varying mediump float vinCoverage_Stage0;
-void main() {
- highp vec2 pos2 = inPosition;
- vinCoverage_Stage0 = inCoverage;
- gl_Position = vec4(pos2.x, pos2.y, 0.0, 1.0);
- gl_Position = vec4(gl_Position.xy * sk_RTAdjust.xz + gl_Position.ww * sk_RTAdjust.yw, 0.0, gl_Position.w);
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/vertex_skia_coverage_position_texcoord_transform.glsl b/cobalt/renderer/glimp_shaders/glsl/vertex_skia_coverage_position_texcoord_transform.glsl
deleted file mode 100644
index db58f8f..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/vertex_skia_coverage_position_texcoord_transform.glsl
+++ /dev/null
@@ -1,17 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform highp vec4 sk_RTAdjust;
-uniform highp mat3 uCoordTransformMatrix_0_Stage0;
-attribute highp vec2 inPosition;
-attribute mediump float inCoverage;
-varying highp vec2 vTransformedCoords_0_Stage0;
-varying mediump float vinCoverage_Stage0;
-void main() {
- highp vec2 pos2 = inPosition;
- vTransformedCoords_0_Stage0 = (uCoordTransformMatrix_0_Stage0 * vec3(inPosition, 1.0)).xy;
- vinCoverage_Stage0 = inCoverage;
- gl_Position = vec4(pos2.x, pos2.y, 0.0, 1.0);
- gl_Position = vec4(gl_Position.xy * sk_RTAdjust.xz + gl_Position.ww * sk_RTAdjust.yw, 0.0, gl_Position.w);
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/vertex_skia_coverage_position_texcoord_transform_2.glsl b/cobalt/renderer/glimp_shaders/glsl/vertex_skia_coverage_position_texcoord_transform_2.glsl
deleted file mode 100644
index 2881e74..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/vertex_skia_coverage_position_texcoord_transform_2.glsl
+++ /dev/null
@@ -1,18 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform highp vec4 sk_RTAdjust;
-uniform highp mat3 uCoordTransformMatrix_0_Stage0;
-attribute highp vec2 position;
-attribute highp float coverage;
-attribute highp vec2 localCoord;
-varying highp vec2 vTransformedCoords_0_Stage0;
-varying highp float vcoverage_Stage0;
-void main() {
- highp vec2 position = position;
- vTransformedCoords_0_Stage0 = (uCoordTransformMatrix_0_Stage0 * vec3(localCoord, 1.0)).xy;
- vcoverage_Stage0 = coverage;
- gl_Position = vec4(position.x, position.y, 0.0, 1.0);
- gl_Position = vec4(gl_Position.xy * sk_RTAdjust.xz + gl_Position.ww * sk_RTAdjust.yw, 0.0, gl_Position.w);
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/vertex_skia_hairquad_position.glsl b/cobalt/renderer/glimp_shaders/glsl/vertex_skia_hairquad_position.glsl
deleted file mode 100644
index f21c1ab..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/vertex_skia_hairquad_position.glsl
+++ /dev/null
@@ -1,14 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform highp vec4 sk_RTAdjust;
-attribute highp vec2 inPosition;
-attribute mediump vec4 inHairQuadEdge;
-varying mediump vec4 vHairQuadEdge_Stage0;
-void main() {
- vHairQuadEdge_Stage0 = inHairQuadEdge;
- highp vec2 pos2 = inPosition;
- gl_Position = vec4(pos2.x, pos2.y, 0.0, 1.0);
- gl_Position = vec4(gl_Position.xy * sk_RTAdjust.xz + gl_Position.ww * sk_RTAdjust.yw, 0.0, gl_Position.w);
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/vertex_skia_position.glsl b/cobalt/renderer/glimp_shaders/glsl/vertex_skia_position.glsl
deleted file mode 100644
index 6a5e9ca..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/vertex_skia_position.glsl
+++ /dev/null
@@ -1,12 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform highp vec4 sk_RTAdjust;
-uniform highp mat3 uViewM_Stage0;
-attribute highp vec2 inPosition;
-void main() {
- highp vec2 pos2 = (uViewM_Stage0 * vec3(inPosition, 1.0)).xy;
- gl_Position = vec4(pos2.x, pos2.y, 0.0, 1.0);
- gl_Position = vec4(gl_Position.xy * sk_RTAdjust.xz + gl_Position.ww * sk_RTAdjust.yw, 0.0, gl_Position.w);
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/vertex_skia_position_texcoord.glsl b/cobalt/renderer/glimp_shaders/glsl/vertex_skia_position_texcoord.glsl
deleted file mode 100644
index 0782274..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/vertex_skia_position_texcoord.glsl
+++ /dev/null
@@ -1,13 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform highp vec4 sk_RTAdjust;
-attribute highp vec2 position;
-attribute highp vec2 localCoord;
-varying highp vec2 vlocalCoord_Stage0;
-void main() {
- vlocalCoord_Stage0 = localCoord;
- gl_Position = vec4(position.x, position.y, 0.0, 1.0);
- gl_Position = vec4(gl_Position.xy * sk_RTAdjust.xz + gl_Position.ww * sk_RTAdjust.yw, 0.0, gl_Position.w);
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/vertex_skia_position_texcoord_2.glsl b/cobalt/renderer/glimp_shaders/glsl/vertex_skia_position_texcoord_2.glsl
deleted file mode 100644
index cbb8078..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/vertex_skia_position_texcoord_2.glsl
+++ /dev/null
@@ -1,20 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform highp vec4 sk_RTAdjust;
-uniform highp vec2 uAtlasSizeInv_Stage0;
-attribute highp vec2 inPosition;
-attribute highp vec2 inTextureCoords;
-varying highp vec2 vTextureCoords_Stage0;
-varying highp float vTexIndex_Stage0;
-void main() {
- highp vec2 indexTexCoords = vec2(inTextureCoords.x, inTextureCoords.y);
- highp vec2 unormTexCoords = floor(0.5 * indexTexCoords);
- highp vec2 diff = indexTexCoords - 2.0 * unormTexCoords;
- highp float texIdx = 2.0 * diff.x + diff.y;
- vTextureCoords_Stage0 = unormTexCoords * uAtlasSizeInv_Stage0;
- vTexIndex_Stage0 = texIdx;
- gl_Position = vec4(inPosition.x, inPosition.y, 0.0, 1.0);
- gl_Position = vec4(gl_Position.xy * sk_RTAdjust.xz + gl_Position.ww * sk_RTAdjust.yw, 0.0, gl_Position.w);
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/vertex_skia_position_texcoord_transform.glsl b/cobalt/renderer/glimp_shaders/glsl/vertex_skia_position_texcoord_transform.glsl
deleted file mode 100644
index d60d1d2..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/vertex_skia_position_texcoord_transform.glsl
+++ /dev/null
@@ -1,14 +0,0 @@
-#version 100
-
-precision mediump float;
-precision mediump sampler2D;
-uniform highp vec4 sk_RTAdjust;
-uniform highp mat3 uCoordTransformMatrix_0_Stage0;
-attribute highp vec2 position;
-attribute highp vec2 localCoord;
-varying highp vec2 vTransformedCoords_0_Stage0;
-void main() {
- vTransformedCoords_0_Stage0 = (uCoordTransformMatrix_0_Stage0 * vec3(localCoord, 1.0)).xy;
- gl_Position = vec4(position.x, position.y, 0.0, 1.0);
- gl_Position = vec4(gl_Position.xy * sk_RTAdjust.xz + gl_Position.ww * sk_RTAdjust.yw, 0.0, gl_Position.w);
-}
diff --git a/cobalt/renderer/glimp_shaders/glsl/vertex_textured_vbo_rgba.glsl b/cobalt/renderer/glimp_shaders/glsl/vertex_textured_vbo_rgba.glsl
deleted file mode 100644
index 5797b87..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/vertex_textured_vbo_rgba.glsl
+++ /dev/null
@@ -1,11 +0,0 @@
-attribute vec3 a_position;
-attribute vec2 a_tex_coord;
-varying vec2 v_tex_coord_rgba;
-uniform vec4 scale_translate_rgba;
-uniform mat4 model_view_projection_transform;
-
-void main() {
- gl_Position = model_view_projection_transform * vec4(a_position.xyz, 1.0);
- v_tex_coord_rgba =
- a_tex_coord * scale_translate_rgba.xy + scale_translate_rgba.zw;
-}
\ No newline at end of file
diff --git a/cobalt/renderer/glimp_shaders/glsl/vertex_textured_vbo_uyvy_1plane.glsl b/cobalt/renderer/glimp_shaders/glsl/vertex_textured_vbo_uyvy_1plane.glsl
deleted file mode 100644
index 3c0ebfa..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/vertex_textured_vbo_uyvy_1plane.glsl
+++ /dev/null
@@ -1,10 +0,0 @@
-attribute vec3 a_position;
-attribute vec2 a_tex_coord;
-varying vec2 v_tex_coord_uyvy;
-uniform vec4 scale_translate_uyvy;
-uniform mat4 model_view_projection_transform;
-
-void main() {
- gl_Position = model_view_projection_transform * vec4(a_position.xyz, 1.0);
- v_tex_coord_uyvy = a_tex_coord * scale_translate_uyvy.xy + scale_translate_uyvy.zw;
-}
\ No newline at end of file
diff --git a/cobalt/renderer/glimp_shaders/glsl/vertex_textured_vbo_yuv_2plane.glsl b/cobalt/renderer/glimp_shaders/glsl/vertex_textured_vbo_yuv_2plane.glsl
deleted file mode 100644
index 8ebde52..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/vertex_textured_vbo_yuv_2plane.glsl
+++ /dev/null
@@ -1,13 +0,0 @@
-attribute vec3 a_position;
-attribute vec2 a_tex_coord;
-varying vec2 v_tex_coord_y;
-varying vec2 v_tex_coord_uv;
-uniform vec4 scale_translate_y;
-uniform vec4 scale_translate_uv;
-uniform mat4 model_view_projection_transform;
-
-void main() {
- gl_Position = model_view_projection_transform * vec4(a_position.xyz, 1.0);
- v_tex_coord_y = a_tex_coord * scale_translate_y.xy + scale_translate_y.zw;
- v_tex_coord_uv = a_tex_coord * scale_translate_uv.xy + scale_translate_uv.zw;
-}
\ No newline at end of file
diff --git a/cobalt/renderer/glimp_shaders/glsl/vertex_textured_vbo_yuv_3plane.glsl b/cobalt/renderer/glimp_shaders/glsl/vertex_textured_vbo_yuv_3plane.glsl
deleted file mode 100644
index c95ad67..0000000
--- a/cobalt/renderer/glimp_shaders/glsl/vertex_textured_vbo_yuv_3plane.glsl
+++ /dev/null
@@ -1,16 +0,0 @@
-attribute vec3 a_position;
-attribute vec2 a_tex_coord;
-varying vec2 v_tex_coord_y;
-varying vec2 v_tex_coord_u;
-varying vec2 v_tex_coord_v;
-uniform vec4 scale_translate_y;
-uniform vec4 scale_translate_u;
-uniform vec4 scale_translate_v;
-uniform mat4 model_view_projection_transform;
-
-void main() {
- gl_Position = model_view_projection_transform * vec4(a_position.xyz, 1.0);
- v_tex_coord_y = a_tex_coord * scale_translate_y.xy + scale_translate_y.zw;
- v_tex_coord_u = a_tex_coord * scale_translate_u.xy + scale_translate_u.zw;
- v_tex_coord_v = a_tex_coord * scale_translate_v.xy + scale_translate_v.zw;
-}
\ No newline at end of file
diff --git a/cobalt/renderer/rasterizer/egl/shaders/generate_shader_impl.py b/cobalt/renderer/rasterizer/egl/shaders/generate_shader_impl.py
index b5d3ba8..7450945 100644
--- a/cobalt/renderer/rasterizer/egl/shaders/generate_shader_impl.py
+++ b/cobalt/renderer/rasterizer/egl/shaders/generate_shader_impl.py
@@ -127,7 +127,7 @@
data_definition_string = '{\n'
for output_line_data in GetChunk(file_contents, chunk_size):
data_definition_string += (
- ' ' + ' '.join(['0x%02x,' % ord(y) for y in output_line_data]) + '\n')
+ ' ' + ' '.join([f'0x{ord(y):02x},' for y in output_line_data]) + '\n')
data_definition_string += '};\n'
return data_definition_string
@@ -137,7 +137,7 @@
source_definition_string = ''
for filename in files:
class_name = GetShaderClassName(filename)
- source_definition_string += '\nconst char %s::kSource[] = ' % class_name
+ source_definition_string += f'\nconst char {class_name}::kSource[] = '
source_definition_string += GetDataDefinitionStringForFile(filename)
return source_definition_string
@@ -169,7 +169,7 @@
"""Generate the actual C++ source file."""
header_filename = os.path.basename(header_filename)
current_year = datetime.datetime.now().year
- with open(source_filename, 'w') as output_file:
+ with open(source_filename, 'w', encoding='utf-8') as output_file:
output_file.write(
SOURCE_FILE_TEMPLATE.format(
year=current_year,
@@ -239,7 +239,7 @@
"""Return a string representing C++ methods for the given attributes."""
methods = ''
for index, name in enumerate(attributes):
- methods += '\nGLuint {0}() const {{ return {1}; }}'.format(name, index)
+ methods += f'\nGLuint {name}() const {{ return {index}; }}'
return methods
@@ -248,11 +248,10 @@
methods = ''
for name in uniforms:
base, count = ParseUniformName(name)
- methods += '\nGLuint {0}() const {{ return {0}_; }}'.format(base)
+ methods += f'\nGLuint {base}() const {{ return {base}_; }}'
if count:
methods += (
- '\nstatic constexpr GLsizei {0}_count() {{ return {1}; }}'.format(
- base, count))
+ f'\nstatic constexpr GLsizei {base}_count() {{ return {count}; }}')
return methods
@@ -261,8 +260,7 @@
methods = ''
for index, name in enumerate(samplers):
methods += (
- '\nGLenum {0}_texunit() const {{ return GL_TEXTURE{1}; }}'.format(
- name, index))
+ f'\nGLenum {name}_texunit() const {{ return GL_TEXTURE{index}; }}')
return methods
@@ -274,24 +272,22 @@
if not elements:
continue
- method = '\nGLuint {0}(int index) const {{\n'.format(array_name)
+ method = f'\nGLuint {array_name}(int index) const {{\n'
for index in range(len(elements)):
- method += ' if (index == {0}) return {1}();\n'.format(
- index, elements[index])
- method += ' NOTREACHED();\n return {0}();\n}}'.format(elements[0])
+ method += f' if (index == {index}) return {elements[index]}();\n'
+ method += f' NOTREACHED();\n return {elements[0]}();\n}}'
methods += (method)
if samplers.count(elements[0]) == 0:
continue
- method = '\nGLenum {0}_texunit(int index) const {{\n'.format(array_name)
+ method = f'\nGLenum {array_name}_texunit(int index) const {{\n'
# Generate *_texunit() for samplers.
for index in range(len(elements)):
- method += ' if (index == {0}) return {1}_texunit();\n'.format(
- index, elements[index])
- method += ' NOTREACHED();\n return {0}_texunit();\n}}'.format(elements[0])
+ method += f' if (index == {index}) return {elements[index]}_texunit();\n'
+ method += f' NOTREACHED();\n return {elements[0]}_texunit();\n}}'
methods += (method)
return methods
@@ -301,7 +297,7 @@
"""Returns a string representing C++ statements to process during prelink."""
statements = ''
for name in attributes:
- statements += '\nBindAttribLocation(program, {0}(), "{0}");'.format(name)
+ statements += f'\nBindAttribLocation(program, {name}(), "{name}");'
return statements
@@ -310,7 +306,7 @@
statements = ''
for name in uniforms:
base, unused_count = ParseUniformName(name)
- statements += '\n{0}_ = GetUniformLocation(program, "{0}");'.format(base)
+ statements += f'\n{base}_ = GetUniformLocation(program, "{base}");'
return statements
@@ -319,7 +315,7 @@
statements = ''
for name in samplers:
statements += (
- '\nSetTextureUnitForUniformSampler({0}(), {0}_texunit());'.format(name))
+ f'\nSetTextureUnitForUniformSampler({name}(), {name}_texunit());')
return statements
@@ -328,7 +324,7 @@
variables = ''
for name in variable_names:
base, unused_count = ParseUniformName(name)
- variables += '\nGLuint {0}_;'.format(base)
+ variables += f'\nGLuint {base}_;'
return variables
@@ -413,9 +409,9 @@
def GenerateHeaderFile(output_filename, all_shaders):
"""Generate the actual C++ header file."""
- include_guard = 'GENERATED_%s_H_' % GetBasename(output_filename).upper()
+ include_guard = f'GENERATED_{GetBasename(output_filename).upper()}_H_'
current_year = datetime.datetime.now().year
- with open(output_filename, 'w') as output_file:
+ with open(output_filename, 'w', encoding='utf-8') as output_file:
output_file.write(
HEADER_FILE_TEMPLATE.format(
year=current_year,
@@ -425,7 +421,7 @@
def main(output_header_filename, output_source_filename, shader_files_file):
all_shader_files = []
- with open(shader_files_file, 'r') as input_file:
+ with open(shader_files_file, 'r', encoding='utf-8') as input_file:
shader_files = input_file.read().splitlines()
for filename in shader_files:
# Ignore *.inc files. These are include files and not shader files.
diff --git a/cobalt/renderer/rasterizer/skia/hardware_mesh.cc b/cobalt/renderer/rasterizer/skia/hardware_mesh.cc
index 39d09af..215cbce 100644
--- a/cobalt/renderer/rasterizer/skia/hardware_mesh.cc
+++ b/cobalt/renderer/rasterizer/skia/hardware_mesh.cc
@@ -20,6 +20,7 @@
#include <utility>
#include <vector>
+#include "base/threading/thread_task_runner_handle.h"
#include "starboard/configuration.h"
namespace cobalt {
@@ -42,7 +43,7 @@
const VertexBufferObject* HardwareMesh::GetVBO() const {
if (!vbo_) {
if (base::MessageLoop::current()) {
- rasterizer_task_runner_ = base::MessageLoop::current()->task_runner();
+ rasterizer_task_runner_ = base::ThreadTaskRunnerHandle::Get();
}
vbo_.reset(new VertexBufferObject(std::move(vertices_), draw_mode_));
}
diff --git a/cobalt/renderer/rasterizer/skia/hardware_resource_provider.cc b/cobalt/renderer/rasterizer/skia/hardware_resource_provider.cc
index 6b7a91e..83f3dc4 100644
--- a/cobalt/renderer/rasterizer/skia/hardware_resource_provider.cc
+++ b/cobalt/renderer/rasterizer/skia/hardware_resource_provider.cc
@@ -18,6 +18,7 @@
#include <utility>
#include "base/synchronization/waitable_event.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "base/trace_event/trace_event.h"
#include "cobalt/base/polymorphic_downcast.h"
#include "cobalt/renderer/backend/egl/graphics_system.h"
@@ -60,7 +61,7 @@
purge_skia_font_caches_on_destruction),
max_texture_size_(gr_context->maxTextureSize()) {
if (base::MessageLoop::current()) {
- rasterizer_task_runner_ = base::MessageLoop::current()->task_runner();
+ rasterizer_task_runner_ = base::ThreadTaskRunnerHandle::Get();
}
// Initialize the font manager now to ensure that it doesn't get initialized
@@ -239,7 +240,9 @@
return render_tree::kMultiPlaneImageFormatYUV3Plane10BitCompactedBT2020;
}
#endif // SB_API_VERSION >= 14
- default: { NOTREACHED(); }
+ default: {
+ NOTREACHED();
+ }
}
return render_tree::kMultiPlaneImageFormatYUV2PlaneBT709;
}
diff --git a/cobalt/renderer/sandbox/renderer_sandbox_main.cc b/cobalt/renderer/sandbox/renderer_sandbox_main.cc
index 5f14c46..7e9a845 100644
--- a/cobalt/renderer/sandbox/renderer_sandbox_main.cc
+++ b/cobalt/renderer/sandbox/renderer_sandbox_main.cc
@@ -17,6 +17,7 @@
#include "base/logging.h"
#include "base/memory/ref_counted.h"
#include "base/message_loop/message_loop.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "base/trace_event/trace_event.h"
#include "cobalt/base/wrap_main.h"
#include "cobalt/math/size.h"
@@ -88,7 +89,7 @@
g_renderer_sandbox = new RendererSandbox();
DCHECK(g_renderer_sandbox);
- base::MessageLoop::current()->task_runner()->PostDelayedTask(
+ base::ThreadTaskRunnerHandle::Get()->PostDelayedTask(
FROM_HERE, quit_closure, base::TimeDelta::FromSeconds(30));
}
diff --git a/cobalt/script/v8c/BUILD.gn b/cobalt/script/v8c/BUILD.gn
index 74add0b..3ecb6e0 100644
--- a/cobalt/script/v8c/BUILD.gn
+++ b/cobalt/script/v8c/BUILD.gn
@@ -36,14 +36,19 @@
}
config("common_public_config") {
+ defines = []
if ((current_cpu == "arm64" || current_cpu == "x64") &&
v8_enable_pointer_compression_override) {
- defines = [
+ defines += [
# enables pointer compression on 64 bit platforms for Cobalt.
"V8_COMPRESS_POINTERS",
"V8_31BIT_SMIS_ON_64BIT_ARCH",
]
}
+
+ if (v8_enable_webassembly) {
+ defines += [ "V8_ENABLE_WEBASSEMBLY" ]
+ }
}
config("engine_all_dependent_config") {
diff --git a/cobalt/script/v8c/isolate_fellowship.cc b/cobalt/script/v8c/isolate_fellowship.cc
index 6496c4c..0af59d5 100644
--- a/cobalt/script/v8c/isolate_fellowship.cc
+++ b/cobalt/script/v8c/isolate_fellowship.cc
@@ -43,17 +43,22 @@
// v8 instance is created.
void V8FlagsInit() {
std::vector<std::string> kV8CommandLineFlags = {
- "--optimize_for_size",
- // Starboard disallow rwx memory access.
- "--write_protect_code_memory",
- // Cobalt's TraceMembers and
- // ScriptValue::*Reference do not currently
- // support incremental tracing.
- "--noincremental_marking_wrappers",
- "--noexpose_wasm",
- "--novalidate_asm",
+ "--optimize_for_size",
+ // Starboard disallow rwx memory access.
+ "--write_protect_code_memory",
+ // Cobalt's TraceMembers and
+ // ScriptValue::*Reference do not currently
+ // support incremental tracing.
+ "--noincremental_marking_wrappers",
+ "--novalidate_asm",
};
+#ifdef V8_ENABLE_WEBASSEMBLY
+ kV8CommandLineFlags.push_back("--expose_wasm");
+#else
+ kV8CommandLineFlags.push_back("--noexpose_wasm");
+#endif
+
if (!configuration::Configuration::GetInstance()->CobaltEnableJit()) {
kV8CommandLineFlags.push_back("--jitless");
}
@@ -63,8 +68,7 @@
}
for (auto flag_str : kV8CommandLineFlags) {
- v8::V8::SetFlagsFromString(flag_str.c_str(),
- strlen(flag_str.c_str()));
+ v8::V8::SetFlagsFromString(flag_str.c_str(), strlen(flag_str.c_str()));
}
#if defined(ENABLE_DEBUG_COMMAND_LINE_SWITCHES)
base::CommandLine* command_line = base::CommandLine::ForCurrentProcess();
@@ -104,7 +108,6 @@
DCHECK(array_buffer_allocator);
delete array_buffer_allocator;
array_buffer_allocator = nullptr;
-
}
} // namespace v8c
diff --git a/cobalt/script/v8c/v8c.cc b/cobalt/script/v8c/v8c.cc
index 5ff424f..ed69e68 100644
--- a/cobalt/script/v8c/v8c.cc
+++ b/cobalt/script/v8c/v8c.cc
@@ -17,6 +17,7 @@
#include "base/command_line.h"
#include "base/files/file_path.h"
#include "base/strings/string_util.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "cobalt/base/wrap_main.h"
#include "cobalt/script/source_code.h"
#include "cobalt/script/standalone_javascript_runner.h"
@@ -79,7 +80,7 @@
SbTimeMonotonic timestamp) {
DCHECK(!g_javascript_runner);
g_javascript_runner = new cobalt::script::StandaloneJavascriptRunner(
- base::MessageLoop::current()->task_runner());
+ base::ThreadTaskRunnerHandle::Get());
DCHECK(g_javascript_runner);
GlobalEnvironment* global_environment =
diff --git a/cobalt/script/v8c/v8c_global_environment.cc b/cobalt/script/v8c/v8c_global_environment.cc
index 0fd5937..2a56a4d 100644
--- a/cobalt/script/v8c/v8c_global_environment.cc
+++ b/cobalt/script/v8c/v8c_global_environment.cc
@@ -22,6 +22,7 @@
#include "base/trace_event/trace_event.h"
#include "cobalt/base/polymorphic_downcast.h"
#include "cobalt/cache/cache.h"
+#include "cobalt/configuration/configuration.h"
#include "cobalt/script/javascript_engine.h"
#include "cobalt/script/v8c/embedded_resources.h"
#include "cobalt/script/v8c/entry_scope.h"
@@ -102,7 +103,11 @@
isolate_->SetAllowWasmCodeGenerationCallback(
[](v8::Local<v8::Context> context, v8::Local<v8::String> source) {
+#ifdef V8_ENABLE_WEBASSEMBLY
+ return true;
+#else
return false;
+#endif
});
isolate_->AddMessageListenerWithErrorLevel(
diff --git a/cobalt/script/v8c/v8c_script_debugger.cc b/cobalt/script/v8c/v8c_script_debugger.cc
index 2778aec..197f747 100644
--- a/cobalt/script/v8c/v8c_script_debugger.cc
+++ b/cobalt/script/v8c/v8c_script_debugger.cc
@@ -17,9 +17,11 @@
#include <memory>
#include <sstream>
#include <string>
+#include <utility>
#include "base/logging.h"
#include "base/strings/string_number_conversions.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "base/trace_event/trace_event.h"
#include "cobalt/base/polymorphic_downcast.h"
#include "cobalt/script/v8c/conversion_helpers.h"
@@ -45,14 +47,16 @@
span<uint8_t> env = state->tokenizer()->GetEnvelope();
auto res = T::fromBinary(env.data(), env.size());
if (!res) {
- // TODO(caseq): properly plumb an error rather than returning a bogus code.
+ // TODO(caseq): properly plumb an error rather than returning a bogus
+ // code.
state->RegisterError(Error::MESSAGE_MUST_BE_AN_OBJECT);
return false;
}
*value = std::move(res);
return true;
}
- static void Serialize(const std::unique_ptr<T>& value, std::vector<uint8_t>* bytes) {
+ static void Serialize(const std::unique_ptr<T>& value,
+ std::vector<uint8_t>* bytes) {
// Use virtual method, so that outgoing protocol objects could be retained
// by a pointer to ProtocolObject.
value->AppendSerialized(bytes);
@@ -61,16 +65,15 @@
template <typename T>
struct ProtocolTypeTraits<
- T,
- typename std::enable_if<
- std::is_base_of<v8_inspector::protocol::Exported, T>::value>::type> {
+ T, typename std::enable_if<
+ std::is_base_of<v8_inspector::protocol::Exported, T>::value>::type> {
static void Serialize(const T& value, std::vector<uint8_t>* bytes) {
// Use virtual method, so that outgoing protocol objects could be retained
// by a pointer to ProtocolObject.
value.AppendSerialized(bytes);
}
};
-}
+} // namespace v8_crdtp
namespace cobalt {
namespace script {
@@ -78,7 +81,9 @@
namespace {
constexpr const char* kInspectorDomains[] = {
- "Runtime", "Debugger", "Profiler",
+ "Runtime",
+ "Debugger",
+ "Profiler",
};
constexpr int kContextGroupId = 1;
@@ -160,8 +165,7 @@
// TODO: there might be an opportunity to utilize the already encoded json to
// reduce network traffic size on the wire.
v8_crdtp::Status status = v8_crdtp::json::ConvertCBORToJSON(
- v8_crdtp::span<uint8_t>(state.data(), state.size()),
- &state_str);
+ v8_crdtp::span<uint8_t>(state.data(), state.size()), &state_str);
CHECK(status.ok()) << status.Message();
return state_str;
}
@@ -246,8 +250,7 @@
remote_object->AppendSerialized(&out);
std::string remote_object_str;
v8_crdtp::Status status = v8_crdtp::json::ConvertCBORToJSON(
- v8_crdtp::span<uint8_t>(out.data(), out.size()),
- &remote_object_str);
+ v8_crdtp::span<uint8_t>(out.data(), out.size()), &remote_object_str);
CHECK(status.ok()) << status.Message();
return remote_object_str;
}
@@ -278,7 +281,7 @@
V8cValueHandleHolder* retval = holder.get();
// Keep the scoped_ptr alive in a no-op task so the holder stays valid until
// the bindings code gets the v8::Value out of it through the raw pointer.
- base::MessageLoop::current()->task_runner()->PostTask(
+ base::ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE, base::Bind([](std::unique_ptr<V8cValueHandleHolder>) {},
base::Passed(&holder)));
return retval;
diff --git a/cobalt/site/docs/codelabs/starboard_extensions/codelab.md b/cobalt/site/docs/codelabs/starboard_extensions/codelab.md
index 7cac353..09e2870 100644
--- a/cobalt/site/docs/codelabs/starboard_extensions/codelab.md
+++ b/cobalt/site/docs/codelabs/starboard_extensions/codelab.md
@@ -378,18 +378,18 @@
<details>
<summary style="display:list-item">`git diff
- starboard/linux/shared/starboard_platform.gypi`</summary>
+ starboard/linux/shared/BUILD.gn`</summary>
```
-@@ -38,6 +38,8 @@
- '<(DEPTH)/starboard/linux/shared/netlink.cc',
- '<(DEPTH)/starboard/linux/shared/netlink.h',
- '<(DEPTH)/starboard/linux/shared/player_components_factory.cc',
-+ '<(DEPTH)/starboard/linux/shared/pleasantry.cc',
-+ '<(DEPTH)/starboard/linux/shared/pleasantry.h',
- '<(DEPTH)/starboard/linux/shared/routes.cc',
- '<(DEPTH)/starboard/linux/shared/routes.h',
- '<(DEPTH)/starboard/linux/shared/system_get_connection_type.cc',
+@@ -71,6 +71,8 @@ static_library("starboard_platform_sources") {
+ "//starboard/linux/shared/netlink.cc",
+ "//starboard/linux/shared/netlink.h",
+ "//starboard/linux/shared/player_components_factory.cc",
++ "//starboard/linux/shared/pleasantry.cc",
++ "//starboard/linux/shared/pleasantry.h",
+ "//starboard/linux/shared/routes.cc",
+ "//starboard/linux/shared/routes.h",
+ "//starboard/linux/shared/soft_mic_platform_service.cc",
```
</details>
@@ -399,14 +399,15 @@
starboard/linux/shared/system_get_extensions.cc`</summary>
```
-@@ -16,12 +16,14 @@
-
- #include "starboard/extension/configuration.h"
- #include "starboard/extension/crash_handler.h"
+@@ -22,6 +22,7 @@
+ #include "starboard/extension/free_space.h"
+ #include "starboard/extension/memory_mapped_file.h"
+ #include "starboard/extension/platform_service.h"
+#include "starboard/extension/pleasantry.h"
- #include "starboard/common/string.h"
- #include "starboard/shared/starboard/crash_handler.h"
- #if SB_IS(EVERGREEN_COMPATIBLE)
+ #include "starboard/linux/shared/soft_mic_platform_service.h"
+ #include "starboard/shared/enhanced_audio/enhanced_audio.h"
+ #include "starboard/shared/ffmpeg/ffmpeg_demuxer.h"
+@@ -33,6 +34,7 @@
#include "starboard/elf_loader/evergreen_config.h"
#endif
#include "starboard/linux/shared/configuration.h"
@@ -414,15 +415,16 @@
const void* SbSystemGetExtension(const char* name) {
#if SB_IS(EVERGREEN_COMPATIBLE)
-@@ -41,5 +43,8 @@ const void* SbSystemGetExtension(const char* name) {
- if (strcmp(name, kStarboardExtensionCrashHandlerName) == 0) {
- return starboard::common::GetCrashHandlerApi();
+@@ -74,5 +76,8 @@ const void* SbSystemGetExtension(const char* name) {
+ return use_ffmpeg_demuxer ? starboard::shared::ffmpeg::GetFFmpegDemuxerApi()
+ : NULL;
}
+ if (strcmp(name, kStarboardExtensionPleasantryName) == 0) {
+ return starboard::shared::GetPleasantryApi();
+ }
return NULL;
}
+
```
</details>
@@ -432,12 +434,11 @@
</summary>
```
-@@ -18,7 +18,9 @@
- #include "cobalt/base/wrap_main.h"
+@@ -19,6 +19,8 @@
#include "cobalt/browser/application.h"
#include "cobalt/browser/switches.h"
-+#include "starboard/extension/pleasantry.h"
#include "cobalt/version.h"
++#include "starboard/extension/pleasantry.h"
+#include "starboard/system.h"
namespace {
@@ -605,12 +606,11 @@
</summary>
```
-@@ -18,7 +18,9 @@
- #include "cobalt/base/wrap_main.h"
+@@ -19,6 +19,8 @@
#include "cobalt/browser/application.h"
#include "cobalt/browser/switches.h"
-+#include "starboard/extension/pleasantry.h"
#include "cobalt/version.h"
++#include "starboard/extension/pleasantry.h"
+#include "starboard/system.h"
namespace {
@@ -630,11 +630,13 @@
void PreloadApplication(int argc, char** argv, const char* link,
const base::Closure& quit_closure,
SbTimeMonotonic timestamp) {
-@@ -77,6 +87,12 @@ void StartApplication(int argc, char** argv, const char* link,
+@@ -77,6 +87,14 @@ void StartApplication(int argc, char** argv, const char* link,
return;
}
LOG(INFO) << "Starting application.";
-+ const StarboardExtensionPleasantryApi* pleasantry_extension = GetPleasantryApi();
++ const StarboardExtensionPleasantryApi* pleasantry_extension =
++ static_cast<const StarboardExtensionPleasantryApi*>(
++ SbSystemGetExtension(kStarboardExtensionPleasantryName));
+ if (pleasantry_extension &&
+ strcmp(pleasantry_extension->name, kStarboardExtensionPleasantryName) == 0 &&
+ pleasantry_extension->version >= 1) {
@@ -643,7 +645,7 @@
#if SB_API_VERSION >= 13
DCHECK(!g_application);
g_application = new cobalt::browser::Application(quit_closure,
-@@ -96,7 +112,14 @@ void StartApplication(int argc, char** argv, const char* link,
+@@ -96,7 +114,14 @@ void StartApplication(int argc, char** argv, const char* link,
}
void StopApplication() {
@@ -664,7 +666,7 @@
</details>
`starboard/linux/shared/pleasantry.h`,
-`starboard/linux/shared/starboard_platform.gypi`, and
+`starboard/linux/shared/BUILD.gn`, and
`starboard/linux/shared/system_get_extensions.cc` should be unchanged from the
Exercise 1 solution.
@@ -710,8 +712,8 @@
`starboard/extension/extension_test.cc`.
Once you've written your test you can execute it to confirm that it passes.
-`starboard/extension/extension.gyp` configures an `extension_test` target to be
-built from our `extension_test.cc` source file. We can build that target for our
+`starboard/extension/BUILD.gn` configures an `extension_test` target to be built
+from our `extension_test.cc` source file. We can build that target for our
platform and then run the executable to run the tests.
```
@@ -775,15 +777,9 @@
**If you'd like to contribute an actual Starboard Extension to Cobalt in order to
add some useful functionality for your platform, we encourage you to start a
-discussion with the Cobalt team before you begin coding.** To do so, please
-[file a feature request](https://issuetracker.google.com/issues/new?component=181120)
-for the extension and include the following information:
-
-* The name of the Starboard Extension.
-* A description of the extension.
-* Why a Starboard Extension is the right tool, instead of some alternative.
-* The fact that you'd like to contribute the extension (i.e., write the code)
- rather than rely on the Cobalt team to prioritize, plan, and implement it.
+discussion with the Cobalt team before you begin coding.** To do so, please file
+a feature request for the extension
+[using this template](https://issuetracker.google.com/issues/new?component=181120&template=1820891).
Please file this feature request with the appropriate priority and the Cobalt
team will review the proposal accordingly. If the Cobalt team approves of the
diff --git a/cobalt/site/docs/development/setup-linux.md b/cobalt/site/docs/development/setup-linux.md
index 78ecf3f..0acdd64 100644
--- a/cobalt/site/docs/development/setup-linux.md
+++ b/cobalt/site/docs/development/setup-linux.md
@@ -19,7 +19,7 @@
```
$ sudo apt update && sudo apt install -qqy --no-install-recommends \
- pkgconf ninja-build bison yasm binutils clang libgles2-mesa-dev \
+ pkgconf ninja-build bison nasm yasm binutils clang libgles2-mesa-dev \
mesa-common-dev libpulse-dev libavresample-dev libasound2-dev \
libxrender-dev libxcomposite-dev libxml2-dev curl git \
python3.8-venv libxi-dev
diff --git a/cobalt/site/docs/development/setup-rdk.md b/cobalt/site/docs/development/setup-rdk.md
new file mode 100644
index 0000000..6a16461
--- /dev/null
+++ b/cobalt/site/docs/development/setup-rdk.md
@@ -0,0 +1,8 @@
+---
+layout: doc
+title: "Set up your environment - RDK"
+---
+
+The detailed documentation for building and using RDK-V reference port is
+provided by the RDK project. Please see details in
+[starboard/contrib/RDK](starboard/contrib/RDK/)
diff --git a/cobalt/site/docs/gen/starboard/doc/building.md b/cobalt/site/docs/gen/starboard/doc/building.md
index 0281e71..99fecd9 100644
--- a/cobalt/site/docs/gen/starboard/doc/building.md
+++ b/cobalt/site/docs/gen/starboard/doc/building.md
@@ -75,9 +75,7 @@
Define a subclass of
`starboard.build.application_configuration.ApplicationConfiguration` and
- override any desired methods. In particular, you probably at least want to
- override the `GetDefaultTargetBuildFile()` method to point at your root
- `.gyp` file from step 1.
+ override any desired methods.
3. Register your Application in your `starboard_configuration.py` file in your
source tree root.
diff --git a/cobalt/site/docs/reference/starboard/gn-configuration.md b/cobalt/site/docs/reference/starboard/gn-configuration.md
index 2fff0ac..a0bbde6 100644
--- a/cobalt/site/docs/reference/starboard/gn-configuration.md
+++ b/cobalt/site/docs/reference/starboard/gn-configuration.md
@@ -28,7 +28,6 @@
| **`sb_enable_cpp17_audit`**<br><br> Enables an NPLB audit of C++17 support.<br><br>The default value is `true`. |
| **`sb_enable_lib`**<br><br> Enables embedding Cobalt as a shared library within another app. This requires a 'lib' starboard implementation for the corresponding platform.<br><br>The default value is `false`. |
| **`sb_enable_opus_sse`**<br><br> Enables optimizations on SSE compatible platforms.<br><br>The default value is `true`. |
-| **`sb_evergreen_compatible_enable_lite`**<br><br> Whether to adopt Evergreen Lite on the Evergreen compatible platform.<br><br>The default value is `false`. |
| **`sb_evergreen_compatible_package`**<br><br> Whether to generate the whole package containing both Loader app and Cobalt core on the Evergreen compatible platform.<br><br>The default value is `false`. |
| **`sb_evergreen_compatible_use_libunwind`**<br><br> Whether to use the libunwind library on Evergreen compatible platform.<br><br>The default value is `false`. |
| **`sb_filter_based_player`**<br><br> Used to indicate that the player is filter based.<br><br>The default value is `true`. |
diff --git a/cobalt/site/docs/reference/starboard/modules/memory.md b/cobalt/site/docs/reference/starboard/modules/memory.md
index e09dbc3..2fbf7dc 100644
--- a/cobalt/site/docs/reference/starboard/modules/memory.md
+++ b/cobalt/site/docs/reference/starboard/modules/memory.md
@@ -233,19 +233,6 @@
void SbMemoryFreeAligned(void *memory)
```
-### SbMemoryGetStackBounds ###
-
-Gets the stack bounds for the current thread.
-
-`out_high`: The highest addressable byte + 1 for the current thread. `out_low`:
-The lowest addressable byte for the current thread.
-
-#### Declaration ####
-
-```
-void SbMemoryGetStackBounds(void **out_high, void **out_low)
-```
-
### SbMemoryMap ###
Allocates `size_bytes` worth of physical memory pages and maps them into an
@@ -345,4 +332,3 @@
```
bool SbMemoryUnmap(void *virtual_address, int64_t size_bytes)
```
-
diff --git a/cobalt/speech/BUILD.gn b/cobalt/speech/BUILD.gn
index 199796a..3f4cc72 100644
--- a/cobalt/speech/BUILD.gn
+++ b/cobalt/speech/BUILD.gn
@@ -86,8 +86,6 @@
"speech_synthesis_utterance.cc",
"speech_synthesis_utterance.h",
"speech_synthesis_voice.h",
- "starboard_speech_recognizer.cc",
- "starboard_speech_recognizer.h",
]
configs += [ ":speech_config" ]
diff --git a/cobalt/speech/google_speech_service.cc b/cobalt/speech/google_speech_service.cc
index 1cb7e36..1ee9e4b 100644
--- a/cobalt/speech/google_speech_service.cc
+++ b/cobalt/speech/google_speech_service.cc
@@ -23,15 +23,16 @@
//
// Talk with your Google representative about how to get speech-api quota.
-#include <memory>
-
#include "cobalt/speech/google_speech_service.h"
+#include <memory>
+
#include "base/bind.h"
#include "base/rand_util.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_util.h"
#include "base/strings/utf_string_conversions.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "cobalt/base/language.h"
#include "cobalt/loader/fetcher_factory.h"
#include "cobalt/network/network_module.h"
@@ -193,7 +194,7 @@
ALLOW_THIS_IN_INITIALIZER_LIST(weak_ptr_factory_(this)),
ALLOW_THIS_IN_INITIALIZER_LIST(
weak_this_(weak_ptr_factory_.GetWeakPtr())),
- wrappables_task_runner_(base::MessageLoop::current()->task_runner()) {
+ wrappables_task_runner_(base::ThreadTaskRunnerHandle::Get()) {
thread_.StartWithOptions(
base::Thread::Options(base::MessageLoop::TYPE_IO, 0));
}
diff --git a/cobalt/speech/sandbox/speech_sandbox_main.cc b/cobalt/speech/sandbox/speech_sandbox_main.cc
index 55e0481..cc67b41 100644
--- a/cobalt/speech/sandbox/speech_sandbox_main.cc
+++ b/cobalt/speech/sandbox/speech_sandbox_main.cc
@@ -15,6 +15,7 @@
#include "base/files/file_path.h"
#include "base/path_service.h"
#include "base/strings/string_number_conversions.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "cobalt/base/wrap_main.h"
#include "cobalt/speech/sandbox/speech_sandbox.h"
#include "url/gurl.h"
@@ -48,7 +49,7 @@
base::FilePath(FILE_PATH_LITERAL("speech_sandbox_trace.json")));
if (timeout != 0) {
- base::MessageLoop::current()->task_runner()->PostDelayedTask(
+ base::ThreadTaskRunnerHandle::Get()->PostDelayedTask(
FROM_HERE, quit_closure, base::TimeDelta::FromSeconds(timeout));
}
}
diff --git a/cobalt/speech/speech_configuration.h b/cobalt/speech/speech_configuration.h
index 6d9cd90..a7a1b49 100644
--- a/cobalt/speech/speech_configuration.h
+++ b/cobalt/speech/speech_configuration.h
@@ -20,8 +20,4 @@
#define SB_USE_SB_MICROPHONE 1
-#if SB_API_VERSION == 12 || (SB_HAS(SPEECH_RECOGNIZER) && SB_API_VERSION < 13)
-#define SB_USE_SB_SPEECH_RECOGNIZER 1
-#endif // SB_API_VERSION == 12 || SB_HAS(SPEECH_RECOGNIZER)
-
#endif // COBALT_SPEECH_SPEECH_CONFIGURATION_H_
diff --git a/cobalt/speech/speech_recognition_manager.cc b/cobalt/speech/speech_recognition_manager.cc
index 218346a..6043ffc 100644
--- a/cobalt/speech/speech_recognition_manager.cc
+++ b/cobalt/speech/speech_recognition_manager.cc
@@ -15,13 +15,11 @@
#include "cobalt/speech/speech_recognition_manager.h"
#include "base/bind.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "cobalt/speech/cobalt_speech_recognizer.h"
#include "cobalt/speech/speech_configuration.h"
#include "cobalt/speech/speech_recognition_error.h"
#include "cobalt/web/dom_exception.h"
-#if defined(SB_USE_SB_SPEECH_RECOGNIZER)
-#include "cobalt/speech/starboard_speech_recognizer.h"
-#endif
-#include "cobalt/speech/cobalt_speech_recognizer.h"
namespace cobalt {
namespace speech {
@@ -31,17 +29,9 @@
const Microphone::Options& microphone_options)
: ALLOW_THIS_IN_INITIALIZER_LIST(weak_ptr_factory_(this)),
weak_this_(weak_ptr_factory_.GetWeakPtr()),
- main_message_loop_task_runner_(
- base::MessageLoop::current()->task_runner()),
+ main_message_loop_task_runner_(base::ThreadTaskRunnerHandle::Get()),
event_callback_(event_callback),
state_(kStopped) {
-#if defined(SB_USE_SB_SPEECH_RECOGNIZER)
- if (StarboardSpeechRecognizer::IsSupported()) {
- recognizer_.reset(new StarboardSpeechRecognizer(base::Bind(
- &SpeechRecognitionManager::OnEventAvailable, base::Unretained(this))));
- return;
- }
-#endif // defined(SB_USE_SB_SPEECH_RECOGNIZER)
if (GoogleSpeechService::GetSpeechAPIKey()) {
recognizer_.reset(new CobaltSpeechRecognizer(
network_module, microphone_options,
diff --git a/cobalt/speech/starboard_speech_recognizer.cc b/cobalt/speech/starboard_speech_recognizer.cc
deleted file mode 100644
index cc37dcc..0000000
--- a/cobalt/speech/starboard_speech_recognizer.cc
+++ /dev/null
@@ -1,210 +0,0 @@
-// Copyright 2017 The Cobalt Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "cobalt/speech/starboard_speech_recognizer.h"
-
-#if defined(SB_USE_SB_SPEECH_RECOGNIZER)
-
-#include <utility>
-
-#include "cobalt/base/tokens.h"
-#include "cobalt/speech/speech_recognition_error.h"
-#include "cobalt/speech/speech_recognition_event.h"
-#include "starboard/common/log.h"
-#include "starboard/types.h"
-
-namespace cobalt {
-namespace speech {
-
-// static
-bool StarboardSpeechRecognizer::IsSupported() {
-#if SB_API_VERSION >= 13
- return false;
-#else
- return SbSpeechRecognizerIsSupported();
-#endif
-}
-
-// static
-void StarboardSpeechRecognizer::OnSpeechDetected(void* context, bool detected) {
- StarboardSpeechRecognizer* recognizer =
- static_cast<StarboardSpeechRecognizer*>(context);
- recognizer->message_loop_->task_runner()->PostTask(
- FROM_HERE,
- base::Bind(&StarboardSpeechRecognizer::OnRecognizerSpeechDetected,
- recognizer->weak_factory_.GetWeakPtr(), detected));
-}
-
-// static
-void StarboardSpeechRecognizer::OnError(void* context,
- SbSpeechRecognizerError error) {
- StarboardSpeechRecognizer* recognizer =
- static_cast<StarboardSpeechRecognizer*>(context);
- recognizer->message_loop_->task_runner()->PostTask(
- FROM_HERE, base::Bind(&StarboardSpeechRecognizer::OnRecognizerError,
- recognizer->weak_factory_.GetWeakPtr(), error));
-}
-
-// static
-void StarboardSpeechRecognizer::OnResults(void* context,
- SbSpeechResult* results,
- int results_size, bool is_final) {
- StarboardSpeechRecognizer* recognizer =
- static_cast<StarboardSpeechRecognizer*>(context);
-
- std::vector<SpeechRecognitionAlternative::Data> results_copy;
- results_copy.reserve(results_size);
- for (int i = 0; i < results_size; ++i) {
- results_copy.emplace_back(SpeechRecognitionAlternative::Data{
- results[i].transcript, results[i].confidence});
- }
-
- recognizer->message_loop_->task_runner()->PostTask(
- FROM_HERE, base::BindOnce(&StarboardSpeechRecognizer::OnRecognizerResults,
- recognizer->weak_factory_.GetWeakPtr(),
- std::move(results_copy), is_final));
-}
-
-StarboardSpeechRecognizer::StarboardSpeechRecognizer(
- const EventCallback& event_callback)
- : SpeechRecognizer(event_callback),
- message_loop_(base::MessageLoop::current()),
- weak_factory_(this) {
- SbSpeechRecognizerHandler handler = {&OnSpeechDetected, &OnError, &OnResults,
- this};
- speech_recognizer_ = SbSpeechRecognizerCreate(&handler);
-
- if (!SbSpeechRecognizerIsValid(speech_recognizer_)) {
- scoped_refptr<web::Event> error_event(new SpeechRecognitionError(
- kSpeechRecognitionErrorCodeServiceNotAllowed, ""));
- RunEventCallback(error_event);
- }
-}
-
-StarboardSpeechRecognizer::~StarboardSpeechRecognizer() {
- if (SbSpeechRecognizerIsValid(speech_recognizer_)) {
- SbSpeechRecognizerDestroy(speech_recognizer_);
- }
-}
-
-void StarboardSpeechRecognizer::Start(const SpeechRecognitionConfig& config) {
- SB_DCHECK(config.max_alternatives < INT_MAX);
- SbSpeechConfiguration configuration = {
- config.continuous, config.interim_results,
- static_cast<int>(config.max_alternatives)};
- if (SbSpeechRecognizerIsValid(speech_recognizer_)) {
- SbSpeechRecognizerStart(speech_recognizer_, &configuration);
- }
-}
-
-void StarboardSpeechRecognizer::Stop() {
- if (SbSpeechRecognizerIsValid(speech_recognizer_)) {
- SbSpeechRecognizerStop(speech_recognizer_);
- }
- // Clear the final results.
- final_results_.clear();
-}
-
-void StarboardSpeechRecognizer::OnRecognizerSpeechDetected(bool detected) {
- scoped_refptr<web::Event> event(new web::Event(
- detected ? base::Tokens::soundstart() : base::Tokens::soundend()));
- RunEventCallback(event);
-}
-
-void StarboardSpeechRecognizer::OnRecognizerError(
- SbSpeechRecognizerError error) {
- scoped_refptr<web::Event> error_event;
- switch (error) {
- case kSbNoSpeechError:
- error_event =
- new SpeechRecognitionError(kSpeechRecognitionErrorCodeNoSpeech, "");
- break;
- case kSbAborted:
- error_event =
- new SpeechRecognitionError(kSpeechRecognitionErrorCodeAborted, "");
- break;
- case kSbAudioCaptureError:
- error_event = new SpeechRecognitionError(
- kSpeechRecognitionErrorCodeAudioCapture, "");
- break;
- case kSbNetworkError:
- error_event =
- new SpeechRecognitionError(kSpeechRecognitionErrorCodeNetwork, "");
- break;
- case kSbNotAllowed:
- error_event =
- new SpeechRecognitionError(kSpeechRecognitionErrorCodeNotAllowed, "");
- break;
- case kSbServiceNotAllowed:
- error_event = new SpeechRecognitionError(
- kSpeechRecognitionErrorCodeServiceNotAllowed, "");
- break;
- case kSbBadGrammar:
- error_event =
- new SpeechRecognitionError(kSpeechRecognitionErrorCodeBadGrammar, "");
- break;
- case kSbLanguageNotSupported:
- error_event = new SpeechRecognitionError(
- kSpeechRecognitionErrorCodeLanguageNotSupported, "");
- break;
- }
- SB_DCHECK(error_event);
- RunEventCallback(error_event);
-}
-
-void StarboardSpeechRecognizer::OnRecognizerResults(
- std::vector<SpeechRecognitionAlternative::Data>&& results, bool is_final) {
- SpeechRecognitionResultList::SpeechRecognitionResults recognition_results;
- SpeechRecognitionResult::SpeechRecognitionAlternatives alternatives;
- for (auto& result : results) {
- scoped_refptr<SpeechRecognitionAlternative> alternative(
- new SpeechRecognitionAlternative(std::move(result)));
- alternatives.push_back(alternative);
- }
- scoped_refptr<SpeechRecognitionResult> recognition_result(
- new SpeechRecognitionResult(alternatives, is_final));
- recognition_results.push_back(recognition_result);
-
- // Gather all results for the SpeechRecognitionEvent, including all final
- // results we've previously accumulated, plus all (final or not) results that
- // we just received.
- SpeechRecognitionResults success_results;
- size_t total_size = final_results_.size() + recognition_results.size();
- success_results.reserve(total_size);
- success_results = final_results_;
- success_results.insert(success_results.end(), recognition_results.begin(),
- recognition_results.end());
-
- size_t result_index = final_results_.size();
- // Update final results list with final results that we just received, so we
- // have them for the next event.
- for (size_t i = 0; i < recognition_results.size(); ++i) {
- if (recognition_results[i]->is_final()) {
- final_results_.push_back(recognition_results[i]);
- }
- }
-
- scoped_refptr<SpeechRecognitionResultList> recognition_list(
- new SpeechRecognitionResultList(success_results));
- scoped_refptr<SpeechRecognitionEvent> recognition_event(
- new SpeechRecognitionEvent(SpeechRecognitionEvent::kResult,
- static_cast<uint32>(result_index),
- recognition_list));
- RunEventCallback(recognition_event);
-}
-
-} // namespace speech
-} // namespace cobalt
-
-#endif // defined(SB_USE_SB_SPEECH_RECOGNIZER)
diff --git a/cobalt/speech/starboard_speech_recognizer.h b/cobalt/speech/starboard_speech_recognizer.h
deleted file mode 100644
index 80b4ca0..0000000
--- a/cobalt/speech/starboard_speech_recognizer.h
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2017 The Cobalt Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef COBALT_SPEECH_STARBOARD_SPEECH_RECOGNIZER_H_
-#define COBALT_SPEECH_STARBOARD_SPEECH_RECOGNIZER_H_
-
-#include <vector>
-
-#include "base/message_loop/message_loop.h"
-#include "cobalt/speech/speech_configuration.h"
-#include "cobalt/speech/speech_recognition_result_list.h"
-#include "cobalt/speech/speech_recognizer.h"
-
-#if defined(SB_USE_SB_SPEECH_RECOGNIZER)
-
-#include "starboard/speech_recognizer.h"
-
-namespace cobalt {
-namespace speech {
-
-// Controls |SbSpeechRecognizer| to access the device's recognition service and
-// receives the speech recognition results from there.
-class StarboardSpeechRecognizer : public SpeechRecognizer {
- public:
- typedef SpeechRecognitionResultList::SpeechRecognitionResults
- SpeechRecognitionResults;
-
- explicit StarboardSpeechRecognizer(const EventCallback& event_callback);
- ~StarboardSpeechRecognizer();
-
- static bool IsSupported();
-
- void Start(const SpeechRecognitionConfig& config) override;
- void Stop() override;
-
- private:
- static void OnSpeechDetected(void* context, bool detected);
- void OnRecognizerSpeechDetected(bool detected);
- static void OnError(void* context, SbSpeechRecognizerError error);
- void OnRecognizerError(SbSpeechRecognizerError error);
- static void OnResults(void* context, SbSpeechResult* results,
- int results_size, bool is_final);
- void OnRecognizerResults(
- std::vector<SpeechRecognitionAlternative::Data>&& results, bool is_final);
-
- SbSpeechRecognizer speech_recognizer_;
-
- // Used for accumulating final results.
- SpeechRecognitionResults final_results_;
-
- // Track the message loop that created this object so that our callbacks can
- // post back to it.
- base::MessageLoop* message_loop_;
-
- // We have our callbacks post events back to us using weak pointers, in case
- // this object is destroyed while those tasks are in flight. Note that it
- // is impossible for the callbacks to be called after this object is
- // destroyed, since SbSpeechRecognizerDestroy() ensures this.
- base::WeakPtrFactory<StarboardSpeechRecognizer> weak_factory_;
-};
-
-} // namespace speech
-} // namespace cobalt
-
-#endif // defined(SB_USE_SB_SPEECH_RECOGNIZER)
-
-#endif // COBALT_SPEECH_STARBOARD_SPEECH_RECOGNIZER_H_
diff --git a/cobalt/system_window/system_window.cc b/cobalt/system_window/system_window.cc
index f2edc9b..bedd522 100644
--- a/cobalt/system_window/system_window.cc
+++ b/cobalt/system_window/system_window.cc
@@ -121,15 +121,7 @@
// Use the current time unless it was overridden.
SbTimeMonotonic timestamp = 0;
-#if SB_API_VERSION >= 13
timestamp = event->timestamp;
-#else // SB_API_VERSION >= 13
- bool use_input_timestamp =
- SbSystemHasCapability(kSbSystemCapabilitySetsInputTimestamp);
- if (use_input_timestamp) {
- timestamp = data.timestamp;
- }
-#endif // SB_API_VERSION >= 13
if (timestamp == 0) {
timestamp = SbTimeGetMonotonicNow();
}
diff --git a/cobalt/tools/automated_testing/c_val_names.py b/cobalt/tools/automated_testing/c_val_names.py
index a394cad..57aad1c 100644
--- a/cobalt/tools/automated_testing/c_val_names.py
+++ b/cobalt/tools/automated_testing/c_val_names.py
@@ -1,4 +1,17 @@
"""Provides names for Cobalt CVals that are used by webdriver-related tests."""
+# Copyright 2018 The Cobalt Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an 'AS IS' BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
from __future__ import absolute_import
from __future__ import division
@@ -62,7 +75,7 @@
def event_value_dictionary(event_type):
- return 'Event.MainWebModule.{}.ValueDictionary'.format(event_type)
+ return f'Event.MainWebModule.{event_type}.ValueDictionary'
def is_render_tree_generation_pending():
diff --git a/cobalt/tools/automated_testing/cobalt_runner.py b/cobalt/tools/automated_testing/cobalt_runner.py
index 3366638..82db23a 100644
--- a/cobalt/tools/automated_testing/cobalt_runner.py
+++ b/cobalt/tools/automated_testing/cobalt_runner.py
@@ -98,7 +98,9 @@
log_file=None,
target_params=None,
success_message=None,
- log_handler=None):
+ log_handler=None,
+ poll_until_wait_seconds=POLL_UNTIL_WAIT_SECONDS,
+ **kwargs):
"""CobaltRunner constructor.
Args:
@@ -110,6 +112,8 @@
with.
success_message: Optional success message to be printed on successful
exit.
+ pull_until_wait_seconds: Seconds to wait while polling for an event.
+ **kwargs: Additional parameters to be passed to the launcher.
"""
# Tracks if test execution started successfully
@@ -129,6 +133,8 @@
self.launcher_params = launcher_params
self.log_handler = log_handler
+ self.poll_until_wait_seconds = poll_until_wait_seconds
+ self.kwargs = kwargs
if log_file:
self.log_file = open(log_file, encoding='utf-8') # pylint: disable=consider-using-with
@@ -265,7 +271,8 @@
out_directory=self.launcher_params.out_directory,
loader_platform=self.launcher_params.loader_platform,
loader_config=self.launcher_params.loader_config,
- loader_out_directory=self.launcher_params.loader_out_directory)
+ loader_out_directory=self.launcher_params.loader_out_directory,
+ **self.kwargs)
self.runner_thread = threading.Thread(target=self._RunLauncher)
self.runner_thread.start()
@@ -475,7 +482,7 @@
"""
start_time = time.time()
while (not self.FindElements(css_selector) and
- (time.time() - start_time < POLL_UNTIL_WAIT_SECONDS)):
+ (time.time() - start_time < self.poll_until_wait_seconds)):
time.sleep(0.5)
if expected_num:
self.FindElements(css_selector, expected_num)
diff --git a/cobalt/tools/automated_testing/webdriver_utils.py b/cobalt/tools/automated_testing/webdriver_utils.py
index 14ddf38..b799cc5 100644
--- a/cobalt/tools/automated_testing/webdriver_utils.py
+++ b/cobalt/tools/automated_testing/webdriver_utils.py
@@ -1,4 +1,17 @@
"""This module provides webdriver-based utility functions."""
+# Copyright 2018 The Cobalt Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an 'AS IS' BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
from __future__ import absolute_import
from __future__ import division
@@ -25,7 +38,7 @@
else:
module_path = 'selenium'
# As of this writing, Google uses selenium 3.0.0b2 internally, so
- # thats what we will target here as well.
+ # that's what we will target here as well.
try:
module = importlib.import_module(module_path)
if submodule is None:
@@ -33,9 +46,8 @@
if not module.__version__.startswith('3.'):
raise ImportError('Not version 3.x.x')
except ImportError:
- sys.stderr.write('Could not import {}\n'
+ sys.stderr.write(f'Could not import {module_path}\n'
'Please install selenium >= 3.0.0b2.\n'
- 'Commonly: \"sudo pip install \'selenium>=3.0.0b2\'\"\n'
- .format(module_path))
+ 'Commonly: \"sudo pip install \'selenium>=3.0.0b2\'\"\n')
sys.exit(1)
return module
diff --git a/cobalt/tools/buildbot/run_black_box_tests.py b/cobalt/tools/buildbot/run_black_box_tests.py
new file mode 100644
index 0000000..439cd2b
--- /dev/null
+++ b/cobalt/tools/buildbot/run_black_box_tests.py
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+#
+# Copyright 2023 The Cobalt Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Compatibility layer for buildbot."""
+
+import sys
+from internal.cobalt.tools.buildbot.run_black_box_tests import main
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/cobalt/tools/buildbot/run_evergreen_tests.py b/cobalt/tools/buildbot/run_evergreen_tests.py
new file mode 100644
index 0000000..9707a31
--- /dev/null
+++ b/cobalt/tools/buildbot/run_evergreen_tests.py
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+#
+# Copyright 2023 The Cobalt Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Compatibility layer for buildbot."""
+
+import sys
+from internal.cobalt.tools.buildbot.run_evergreen_tests import main
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/cobalt/tools/buildbot/run_py_tests.py b/cobalt/tools/buildbot/run_py_tests.py
new file mode 100644
index 0000000..bde3767
--- /dev/null
+++ b/cobalt/tools/buildbot/run_py_tests.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python3
+#
+# Copyright 2023 The Cobalt Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Compatibility layer for buildbot."""
+
+from internal.cobalt.tools.buildbot.run_py_tests import main
+
+if __name__ == '__main__':
+ main()
diff --git a/cobalt/tools/buildbot/run_unit_tests.py b/cobalt/tools/buildbot/run_unit_tests.py
new file mode 100644
index 0000000..d09fa41
--- /dev/null
+++ b/cobalt/tools/buildbot/run_unit_tests.py
@@ -0,0 +1,27 @@
+#!/usr/bin/env python3
+#
+# Copyright 2023 The Cobalt Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Compatibility layer for buildbot."""
+
+import logging
+import sys
+from internal.cobalt.tools.buildbot.run_unit_tests import main
+
+if __name__ == '__main__':
+ try:
+ sys.exit(int(main()))
+ except Exception as err: # pylint: disable=broad-except
+ logging.exception('Exception happened during main: %s', str(err))
+ sys.exit(1)
diff --git a/cobalt/tools/collectd/cobalt.py b/cobalt/tools/collectd/cobalt.py
index dd38f9e..b0c7343 100644
--- a/cobalt/tools/collectd/cobalt.py
+++ b/cobalt/tools/collectd/cobalt.py
@@ -11,11 +11,12 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+# pylint: disable=redefined-outer-name,redefined-builtin,broad-except,unused-argument
+'''Collectd module for Cobalt'''
import json
import time
import argparse
-import random
import websocket
try:
@@ -27,14 +28,13 @@
def config(conf):
- print('config called {!r}'.format(conf))
+ print(f'config called {conf!r}')
conf_['collectd_conf'] = conf
- collectd.info('config called {!r}'.format(conf))
+ collectd.info(f'config called {conf!r}')
def reconnect():
- ws = websocket.create_connection('ws://{}:{}'.format(conf_['host'],
- conf_['port']))
+ ws = websocket.create_connection(f"ws://{conf_['host']}:{conf_['port']}")
ws.settimeout(3)
setattr(ws, 'message_id', 1)
conf_['ws'] = ws
@@ -43,7 +43,7 @@
def init():
conf = conf_['collectd_conf']
for child in conf.children:
- collectd.info('conf.child key {} values {}'.format(child.key, child.values))
+ collectd.info(f'conf.child key {child.key} values {child.values}')
if child.key.lower() == 'address':
host, port = child.values[0].split(':')
conf_['host'] = host
@@ -67,7 +67,7 @@
if 'result' in parsed_message and parsed_message['id'] == result_id:
matching_result = parsed_message
break
- except:
+ except Exception:
break
return (matching_result, messages)
@@ -101,13 +101,13 @@
# collectd.warning('key {} value: {}'.format(key, value))
report_value('cobalt', collectd_type, key, value)
except TypeError:
- collectd.warning('Failed to collect: {} {}'.format(key, val))
+ collectd.warning(f'Failed to collect: {key} {val}')
# Tuple of a Cobalt Cval, and data type
-# Collectd frontends like CGP group values to graphs by the reported data type. The types here are
-# simple gauges grouped by their rough estimated orders of magnitude, so that all plots are still
-# somewhat discernible.
+# Collectd frontends like CGP group values to graphs by the reported data type.
+# The types here are simple gauges grouped by their rough estimated orders of
+# magnitude, so that all plots are still somewhat discernible.
tracked_stats = [('Cobalt.Lifetime', 'lifetime'),
('Count.DOM.ActiveJavaScriptEvents', 'gauge'),
('Count.DOM.Attrs', 'gauge_10k'),
@@ -152,24 +152,28 @@
collectd.register_init(init)
collectd.register_read(read)
-
# Debugcode to verify plugin connection
if __name__ == '__main__':
+
class Bunch:
- def __init__(self, **kwds):
- self.__dict__.update(kwds)
- def debugprint(*args,**kwargs):
- print('{!r} {!r}'.format(args,kwargs))
+
+ def __init__(self, **kwds):
+ self.__dict__.update(kwds)
+
+ def debugprint(*args, **kwargs):
+ print(f'{args!r} {kwargs!r}')
+
def debugvalues(**kwargs):
debugprint(**kwargs)
- return Bunch(dispatch=debugprint,**kwargs)
- collectd = Bunch(info=debugprint,warning=debugprint,Values=debugvalues)
+ return Bunch(dispatch=debugprint, **kwargs)
+
+ collectd = Bunch(info=debugprint, warning=debugprint, Values=debugvalues)
parser = argparse.ArgumentParser()
parser.add_argument('--host', default='localhost')
parser.add_argument('--port', type=int, default=9222)
args = parser.parse_args()
- conf = Bunch(children=[Bunch( key='address',
- values=['{}:{}'.format(args.host,args.port)])])
- conf_['collectd_conf'] = conf
+ conf = Bunch(
+ children=[Bunch(key='address', values=[f'{args.host}:{args.port}'])])
+ conf_['collectd_conf'] = conf
init()
cobalt_read()
diff --git a/cobalt/tools/verify-trace-members/verify-trace-members.py b/cobalt/tools/verify-trace-members/verify-trace-members.py
index 6692482..3643891 100755
--- a/cobalt/tools/verify-trace-members/verify-trace-members.py
+++ b/cobalt/tools/verify-trace-members/verify-trace-members.py
@@ -13,6 +13,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+# pylint: disable=invalid-name
"""A wrapper around the native-trace-members binary
This script is responsible for:
@@ -63,7 +64,7 @@
break
if not found_dir:
- logging.error('At least one of {} must exist.'.format(LINUX_OUT_DIRS))
+ logging.error('At least one of %s must exist.', LINUX_OUT_DIRS)
sys.exit(1)
@@ -72,9 +73,10 @@
to build the target "all".
"""
- p = subprocess.Popen(['ninja', '-t', 'commands', 'all'],
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
+ p = subprocess.Popen( # pylint: disable=consider-using-with
+ ['ninja', '-t', 'commands', 'all'],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
assert len(stderr) == 0
assert p.returncode == 0
@@ -194,12 +196,14 @@
# TODO: Accept list of files as arguments.
if not source_file.startswith('../../cobalt/dom/'):
continue
- p = subprocess.Popen(
- tool_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ p = subprocess.Popen( # pylint: disable=consider-using-with
+ tool_command,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if len(stderr.decode()) != 0 or p.returncode != 0:
logging.error(stderr)
- exit(1)
+ exit(1) # pylint: disable=consider-using-sys-exit
for line in stdout.decode().splitlines():
# Load and then dump to JSON to standardize w.r.t. to whitespace/order, so
# we can use the stringified object itself as a key into a set.
@@ -239,24 +243,22 @@
if message_type == 'needsTraceMembersDeclaration':
parent_class_friendly = suggestion['parentClassFriendly']
field_name = suggestion['fieldName']
- print('{} needs to declare TraceMembers because of field {}'.format(
- parent_class_friendly, field_name))
+ print(f'{parent_class_friendly} needs to declare TraceMembers because '
+ f'of field {field_name}')
print(' void TraceMembers(script::Tracer* tracer) override;')
elif message_type == 'needsTracerTraceField':
parent_class_friendly = suggestion['parentClassFriendly']
field_name = suggestion['fieldName']
- print('{} needs to trace field {}'.format(parent_class_friendly,
- field_name))
- print(' tracer->Trace({});'.format(field_name))
+ print(f'{parent_class_friendly} needs to trace field {field_name}')
+ print(f' tracer->Trace({field_name});')
elif message_type == 'needsCallBaseTraceMembers':
parent_class_friendly = suggestion['parentClassFriendly']
base_names = suggestion['baseNames']
- print(
- '{} needs to call base class TraceMembers in its TraceMembers'.format(
- parent_class_friendly))
+ print(f'{parent_class_friendly} needs to call base class TraceMembers '
+ 'in its TraceMembers')
print('Something like (this is probably over-qualified):')
for base_name in base_names:
- print(' {}::TraceMembers(tracer);'.format(base_name))
+ print(f' {base_name}::TraceMembers(tracer);')
else:
assert False
diff --git a/cobalt/tools/webdriver_benchmark_config.py b/cobalt/tools/webdriver_benchmark_config.py
deleted file mode 100644
index f551f35..0000000
--- a/cobalt/tools/webdriver_benchmark_config.py
+++ /dev/null
@@ -1,26 +0,0 @@
-#
-# Copyright 2017 The Cobalt Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Class for configuring Webdriver Benchmarks."""
-
-# WEBDRIVER SCRIPT CONFIGURATION PARAMETERS
-MINIMAL_SIZE = 'minimal'
-REDUCED_SIZE = 'reduced'
-STANDARD_SIZE = 'standard'
-SAMPLE_SIZES = [MINIMAL_SIZE, REDUCED_SIZE, STANDARD_SIZE]
-
-DISABLE_VIDEOS = '--disable_videos'
-
-# COBALT COMMAND LINE PARAMETERS
-DISABLE_SPLASH_SCREEN_ON_RELOADS = '--disable_splash_screen_on_reloads'
diff --git a/cobalt/trace_event/scoped_trace_to_file.cc b/cobalt/trace_event/scoped_trace_to_file.cc
index ffbe948..453b013 100644
--- a/cobalt/trace_event/scoped_trace_to_file.cc
+++ b/cobalt/trace_event/scoped_trace_to_file.cc
@@ -12,15 +12,16 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#include <memory>
-
#include "cobalt/trace_event/scoped_trace_to_file.h"
+#include <memory>
+
#include "base/bind.h"
#include "base/logging.h"
#include "base/memory/ptr_util.h"
#include "base/message_loop/message_loop.h"
#include "base/path_service.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "base/trace_event/trace_event.h"
#include "cobalt/base/cobalt_paths.h"
#include "cobalt/trace_event/json_file_outputter.h"
@@ -71,7 +72,7 @@
void TraceToFileForDuration(const base::FilePath& output_path_relative_to_logs,
const base::TimeDelta& duration) {
- base::MessageLoop::current()->task_runner()->PostDelayedTask(
+ base::ThreadTaskRunnerHandle::Get()->PostDelayedTask(
FROM_HERE,
base::Bind(&EndTimedTrace,
base::Passed(base::WrapUnique(
diff --git a/cobalt/ui_navigation/interface.cc b/cobalt/ui_navigation/interface.cc
index da1f51f..2dd0fb0 100644
--- a/cobalt/ui_navigation/interface.cc
+++ b/cobalt/ui_navigation/interface.cc
@@ -157,13 +157,8 @@
interface.set_item_container_item = sb_ui_interface.set_item_container_item;
interface.set_item_content_offset = sb_ui_interface.set_item_content_offset;
interface.get_item_content_offset = sb_ui_interface.get_item_content_offset;
-#if SB_API_VERSION >= 13
interface.set_item_focus_duration = sb_ui_interface.set_item_focus_duration;
interface.do_batch_update = sb_ui_interface.do_batch_update;
-#else
- interface.set_item_focus_duration = &SetItemFocusDuration;
- interface.do_batch_update = &DoBatchUpdate;
-#endif
return interface;
}
diff --git a/cobalt/ui_navigation/nav_item.cc b/cobalt/ui_navigation/nav_item.cc
index 21874ac..d946463 100644
--- a/cobalt/ui_navigation/nav_item.cc
+++ b/cobalt/ui_navigation/nav_item.cc
@@ -186,11 +186,9 @@
void NavItem::UnfocusAll() {
starboard::ScopedSpinLock lock(&g_pending_updates_lock);
g_pending_focus = kNativeItemInvalid;
-#if SB_API_VERSION >= 13
g_pending_updates->emplace_back(
kNativeItemInvalid,
base::Bind(GetInterface().set_focus, kNativeItemInvalid));
-#endif
}
void NavItem::SetEnabled(bool enabled) {
diff --git a/cobalt/ui_navigation/scroll_engine/BUILD.gn b/cobalt/ui_navigation/scroll_engine/BUILD.gn
index 0c3cd62..980b3fd 100644
--- a/cobalt/ui_navigation/scroll_engine/BUILD.gn
+++ b/cobalt/ui_navigation/scroll_engine/BUILD.gn
@@ -28,3 +28,15 @@
"//cobalt/ui_navigation",
]
}
+
+target(gtest_target_type, "scroll_engine_tests") {
+ testonly = true
+ sources = [ "free_scrolling_nav_item_test.cc" ]
+ deps = [
+ ":scroll_engine",
+ "//cobalt/base",
+ "//cobalt/test:run_all_unittests",
+ "//testing/gmock",
+ "//testing/gtest",
+ ]
+}
diff --git a/cobalt/ui_navigation/scroll_engine/free_scrolling_nav_item.cc b/cobalt/ui_navigation/scroll_engine/free_scrolling_nav_item.cc
index 29eb3ae..dd03104 100644
--- a/cobalt/ui_navigation/scroll_engine/free_scrolling_nav_item.cc
+++ b/cobalt/ui_navigation/scroll_engine/free_scrolling_nav_item.cc
@@ -31,7 +31,7 @@
initial_offset_(initial_offset),
target_offset_(target_offset),
animation_duration_(animation_duration) {
- initial_change_ = base::Time::Now();
+ initial_change_ = clock_->Now();
// Constants are derived from the ease-in-out curve definition here:
// https://www.w3.org/TR/2023/CRD-css-easing-1-20230213/#typedef-cubic-bezier-easing-function
@@ -45,7 +45,7 @@
return 1.f;
}
- auto now = base::Time::Now();
+ auto now = clock_->Now();
auto time_delta = now - initial_change_;
auto fraction_of_progress =
time_delta.InMillisecondsF() / animation_duration_.InMillisecondsF();
diff --git a/cobalt/ui_navigation/scroll_engine/free_scrolling_nav_item.h b/cobalt/ui_navigation/scroll_engine/free_scrolling_nav_item.h
index cbd8592..05005e0 100644
--- a/cobalt/ui_navigation/scroll_engine/free_scrolling_nav_item.h
+++ b/cobalt/ui_navigation/scroll_engine/free_scrolling_nav_item.h
@@ -15,6 +15,9 @@
#ifndef COBALT_UI_NAVIGATION_SCROLL_ENGINE_FREE_SCROLLING_NAV_ITEM_H_
#define COBALT_UI_NAVIGATION_SCROLL_ENGINE_FREE_SCROLLING_NAV_ITEM_H_
+#include "base/time/clock.h"
+#include "base/time/default_clock.h"
+#include "base/time/time.h"
#include "cobalt/cssom/timing_function.h"
#include "cobalt/math/vector2d_f.h"
#include "cobalt/ui_navigation/nav_item.h"
@@ -37,6 +40,11 @@
bool AnimationIsComplete();
math::Vector2dF GetCurrentOffset();
+ void set_clock_for_testing(const base::Clock* clock) { clock_ = clock; }
+ void set_initial_change_for_testing(const base::Time initial_change) {
+ initial_change_ = initial_change;
+ }
+
private:
scoped_refptr<NavItem> nav_item_;
math::Vector2dF initial_offset_;
@@ -44,6 +52,7 @@
scoped_refptr<cssom::CubicBezierTimingFunction> animation_function_;
base::TimeDelta animation_duration_;
base::Time initial_change_;
+ const base::Clock* clock_ = base::DefaultClock::GetInstance();
};
} // namespace scroll_engine
diff --git a/cobalt/ui_navigation/scroll_engine/free_scrolling_nav_item_test.cc b/cobalt/ui_navigation/scroll_engine/free_scrolling_nav_item_test.cc
new file mode 100644
index 0000000..f405a2d
--- /dev/null
+++ b/cobalt/ui_navigation/scroll_engine/free_scrolling_nav_item_test.cc
@@ -0,0 +1,80 @@
+// Copyright 2023 The Cobalt Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "cobalt/ui_navigation/scroll_engine/free_scrolling_nav_item.h"
+
+#include "base/test/simple_test_clock.h"
+#include "base/time/time.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using ::testing::_;
+
+namespace cobalt {
+namespace ui_navigation {
+namespace scroll_engine {
+
+namespace {
+
+constexpr base::TimeDelta kTestDuration = base::TimeDelta::FromSeconds(5);
+
+class FreeScrollingNavItemTest : public ::testing::Test {
+ protected:
+ void SetUp() override {
+ free_scrolling_nav_item.set_clock_for_testing(&test_clock);
+ free_scrolling_nav_item.set_initial_change_for_testing(test_clock.Now());
+ }
+
+ scoped_refptr<NavItem> mock_nav_item_;
+ math::Vector2dF initial_offset_{0.f, 0.f};
+ math::Vector2dF target_offset_{10.f, 10.f};
+ base::TimeDelta animation_duration_{kTestDuration};
+ float animation_slope_{0.5f};
+ base::SimpleTestClock test_clock;
+ FreeScrollingNavItem free_scrolling_nav_item{
+ mock_nav_item_, initial_offset_, target_offset_, animation_duration_,
+ animation_slope_};
+};
+
+} // namespace
+
+TEST_F(FreeScrollingNavItemTest, GetFractionOfCurrentProgressTest) {
+ EXPECT_EQ(free_scrolling_nav_item.GetFractionOfCurrentProgress(), 0.f);
+ test_clock.Advance(kTestDuration / 2);
+ EXPECT_EQ(free_scrolling_nav_item.GetFractionOfCurrentProgress(), 0.5f);
+ test_clock.Advance(kTestDuration / 2);
+ EXPECT_EQ(free_scrolling_nav_item.GetFractionOfCurrentProgress(), 1.f);
+ test_clock.Advance(kTestDuration / 2);
+ EXPECT_EQ(free_scrolling_nav_item.GetFractionOfCurrentProgress(), 1.f);
+}
+
+TEST_F(FreeScrollingNavItemTest, AnimationIsCompleteTest) {
+ EXPECT_FALSE(free_scrolling_nav_item.AnimationIsComplete());
+ test_clock.Advance(kTestDuration / 2);
+ EXPECT_FALSE(free_scrolling_nav_item.AnimationIsComplete());
+ test_clock.Advance(kTestDuration / 2);
+ EXPECT_TRUE(free_scrolling_nav_item.AnimationIsComplete());
+}
+
+TEST_F(FreeScrollingNavItemTest, GetCurrentOffsetTest) {
+ EXPECT_EQ(free_scrolling_nav_item.GetCurrentOffset(), initial_offset_);
+ test_clock.Advance(kTestDuration);
+ EXPECT_EQ(free_scrolling_nav_item.GetCurrentOffset(), target_offset_);
+ test_clock.Advance(kTestDuration);
+ EXPECT_EQ(free_scrolling_nav_item.GetCurrentOffset(), target_offset_);
+}
+
+} // namespace scroll_engine
+} // namespace ui_navigation
+} // namespace cobalt
diff --git a/cobalt/updater/configurator.cc b/cobalt/updater/configurator.cc
index 027a68a..4fbcd1b 100644
--- a/cobalt/updater/configurator.cc
+++ b/cobalt/updater/configurator.cc
@@ -22,9 +22,12 @@
#include "components/update_client/patcher.h"
#include "components/update_client/protocol_handler.h"
#include "components/update_client/unzipper.h"
+#include "starboard/common/system_property.h"
#include "starboard/system.h"
#include "url/gurl.h"
+using starboard::kSystemPropertyMaxLength;
+
namespace {
// Default time constants.
@@ -35,7 +38,6 @@
const char kOmahaCobaltAppID[] = "{6D4E53F3-CC64-4CB8-B6BD-AB0B8F300E1C}";
std::string GetDeviceProperty(SbSystemPropertyId id) {
- const size_t kSystemPropertyMaxLength = 1024;
char value[kSystemPropertyMaxLength];
bool result;
result = SbSystemGetProperty(id, value, kSystemPropertyMaxLength);
diff --git a/cobalt/updater/network_fetcher.cc b/cobalt/updater/network_fetcher.cc
index c70b636..2f0019e 100644
--- a/cobalt/updater/network_fetcher.cc
+++ b/cobalt/updater/network_fetcher.cc
@@ -189,6 +189,7 @@
url_fetcher_->SetRequestContext(
network_module_->url_request_context_getter().get());
+ network_module_->AddClientHintHeaders(*url_fetcher_);
// Request mode is kCORSModeOmitCredentials.
const uint32 kDisableCookiesAndCacheLoadFlags =
diff --git a/cobalt/updater/one_app_only_sandbox.cc b/cobalt/updater/one_app_only_sandbox.cc
index 33e6478..0ded6a7 100644
--- a/cobalt/updater/one_app_only_sandbox.cc
+++ b/cobalt/updater/one_app_only_sandbox.cc
@@ -92,20 +92,10 @@
return;
}
LOG(INFO) << "Starting application.";
-#if SB_API_VERSION >= 13
DCHECK(!g_application);
g_application = new cobalt::browser::Application(
quit_closure, false /*not_preload*/, timestamp);
DCHECK(g_application);
-#else
- if (!g_application) {
- g_application = new cobalt::browser::Application(
- quit_closure, false /*should_preload*/, timestamp);
- DCHECK(g_application);
- } else {
- g_application->Start(timestamp);
- }
-#endif // SB_API_VERSION >= 13
}
void StopApplication() {
diff --git a/cobalt/updater/updater_module.cc b/cobalt/updater/updater_module.cc
index 2305b02..98cbb29 100644
--- a/cobalt/updater/updater_module.cc
+++ b/cobalt/updater/updater_module.cc
@@ -147,11 +147,6 @@
updater_thread_->task_runner()->PostTask(
FROM_HERE,
base::Bind(&UpdaterModule::Initialize, base::Unretained(this)));
-
- // Mark the current installation as successful.
- updater_thread_->task_runner()->PostTask(
- FROM_HERE,
- base::Bind(&UpdaterModule::MarkSuccessful, base::Unretained(this)));
}
UpdaterModule::~UpdaterModule() {
@@ -238,8 +233,14 @@
}
void UpdaterModule::MarkSuccessful() {
+ updater_thread_->task_runner()->PostTask(
+ FROM_HERE,
+ base::Bind(&UpdaterModule::MarkSuccessfulImpl, base::Unretained(this)));
+}
+
+void UpdaterModule::MarkSuccessfulImpl() {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
- LOG(INFO) << "UpdaterModule::MarkSuccessful";
+ LOG(INFO) << "UpdaterModule::MarkSuccessfulImpl";
auto installation_manager =
static_cast<const CobaltExtensionInstallationManagerApi*>(
diff --git a/cobalt/updater/updater_module.h b/cobalt/updater/updater_module.h
index f88ed46..7bb9136 100644
--- a/cobalt/updater/updater_module.h
+++ b/cobalt/updater/updater_module.h
@@ -156,6 +156,8 @@
bool GetUseCompressedUpdates() const;
void SetUseCompressedUpdates(bool use_compressed_updates);
+ void MarkSuccessful();
+
private:
std::unique_ptr<base::Thread> updater_thread_;
scoped_refptr<update_client::UpdateClient> update_client_;
@@ -173,7 +175,7 @@
void Initialize();
void Finalize();
- void MarkSuccessful();
+ void MarkSuccessfulImpl();
void Update();
};
diff --git a/cobalt/web/agent.cc b/cobalt/web/agent.cc
index 9914d4d..4341600 100644
--- a/cobalt/web/agent.cc
+++ b/cobalt/web/agent.cc
@@ -20,6 +20,7 @@
#include "base/observer_list.h"
#include "base/threading/thread_checker.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "base/trace_event/trace_event.h"
#include "cobalt/base/startup_timer.h"
#include "cobalt/loader/fetcher_factory.h"
@@ -290,7 +291,7 @@
// are added after the global object is created.
if (!options.injected_global_object_attributes.empty()) {
DCHECK(base::MessageLoop::current());
- base::MessageLoop::current()->task_runner()->PostTask(
+ base::ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE,
base::Bind(&Impl::InjectGlobalObjectAttributes, base::Unretained(this),
options.injected_global_object_attributes));
diff --git a/cobalt/web/cobalt_ua_data_values.idl b/cobalt/web/cobalt_ua_data_values.idl
index d8832df..399651a 100644
--- a/cobalt/web/cobalt_ua_data_values.idl
+++ b/cobalt/web/cobalt_ua_data_values.idl
@@ -22,6 +22,8 @@
DOMString evergreenType;
DOMString evergreenFileType;
DOMString evergreenVersion;
+ DOMString firmwareVersionDetails;
+ DOMString osExperience;
DOMString starboardVersion;
DOMString originalDesignManufacturer;
DOMString deviceType;
diff --git a/cobalt/web/cobalt_ua_data_values_interface.cc b/cobalt/web/cobalt_ua_data_values_interface.cc
index d1dd39a..47e23dd 100644
--- a/cobalt/web/cobalt_ua_data_values_interface.cc
+++ b/cobalt/web/cobalt_ua_data_values_interface.cc
@@ -67,6 +67,12 @@
if (init_dict.has_evergreen_version()) {
evergreen_version_ = init_dict.evergreen_version();
}
+ if (init_dict.has_firmware_version_details()) {
+ firmware_version_details_ = init_dict.firmware_version_details();
+ }
+ if (init_dict.has_os_experience()) {
+ os_experience_ = init_dict.os_experience();
+ }
if (init_dict.has_starboard_version()) {
starboard_version_ = init_dict.starboard_version();
}
diff --git a/cobalt/web/cobalt_ua_data_values_interface.h b/cobalt/web/cobalt_ua_data_values_interface.h
index 15612a1..81dcc70 100644
--- a/cobalt/web/cobalt_ua_data_values_interface.h
+++ b/cobalt/web/cobalt_ua_data_values_interface.h
@@ -48,6 +48,10 @@
return evergreen_file_type_;
}
const std::string& evergreen_version() const { return evergreen_version_; }
+ const std::string& firmware_version_details() const {
+ return firmware_version_details_;
+ }
+ const std::string& os_experience() const { return os_experience_; }
const std::string& starboard_version() const { return starboard_version_; }
const std::string& original_design_manufacturer() const {
return original_design_manufacturer_;
@@ -79,6 +83,8 @@
std::string evergreen_type_;
std::string evergreen_file_type_;
std::string evergreen_version_;
+ std::string firmware_version_details_;
+ std::string os_experience_;
std::string starboard_version_;
std::string original_design_manufacturer_;
std::string device_type_;
diff --git a/cobalt/web/cobalt_ua_data_values_interface.idl b/cobalt/web/cobalt_ua_data_values_interface.idl
index 6243b22..fc670c2 100644
--- a/cobalt/web/cobalt_ua_data_values_interface.idl
+++ b/cobalt/web/cobalt_ua_data_values_interface.idl
@@ -32,6 +32,8 @@
readonly attribute DOMString evergreenType;
readonly attribute DOMString evergreenFileType;
readonly attribute DOMString evergreenVersion;
+ readonly attribute DOMString firmwareVersionDetails;
+ readonly attribute DOMString osExperience;
readonly attribute DOMString starboardVersion;
readonly attribute DOMString originalDesignManufacturer;
readonly attribute DOMString deviceType;
diff --git a/cobalt/web/csp_delegate.cc b/cobalt/web/csp_delegate.cc
index 3fe54b5..92d0520 100644
--- a/cobalt/web/csp_delegate.cc
+++ b/cobalt/web/csp_delegate.cc
@@ -80,8 +80,8 @@
if (csp_header_policy_ == csp::kCSPRequired || should_allow) {
return should_allow;
} else {
- DLOG(WARNING) << "Page must include Content-Security-Policy header, it "
- "will fail to load in production builds of Cobalt!";
+ LOG(WARNING) << "Page must include Content-Security-Policy header, it "
+ "will fail to load in production builds of Cobalt!";
}
}
@@ -185,7 +185,7 @@
} else {
// Didn't find Content-Security-Policy header.
if (!headers.content_security_policy_report_only().empty()) {
- DLOG(INFO)
+ LOG(INFO)
<< "Content-Security-Policy-Report-Only headers were "
"received, but Content-Security-Policy headers are required.";
}
diff --git a/cobalt/web/event_target.cc b/cobalt/web/event_target.cc
index eaea80d..f270d18 100644
--- a/cobalt/web/event_target.cc
+++ b/cobalt/web/event_target.cc
@@ -21,6 +21,7 @@
#include "base/bind_helpers.h"
#include "base/memory/ptr_util.h"
#include "base/message_loop/message_loop.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "base/trace_event/trace_event.h"
#include "cobalt/base/polymorphic_downcast.h"
#include "cobalt/script/environment_settings.h"
@@ -159,7 +160,7 @@
if (!base::MessageLoop::current()) {
return;
}
- base::MessageLoop::current()->task_runner()->PostTask(
+ base::ThreadTaskRunnerHandle::Get()->PostTask(
location,
base::Bind(base::IgnoreResult(&EventTarget::DispatchEventAndRunCallback),
base::AsWeakPtr<EventTarget>(this), event, callback));
@@ -171,7 +172,7 @@
if (!base::MessageLoop::current()) {
return;
}
- base::MessageLoop::current()->task_runner()->PostTask(
+ base::ThreadTaskRunnerHandle::Get()->PostTask(
location,
base::Bind(
base::IgnoreResult(&EventTarget::DispatchEventNameAndRunCallback),
diff --git a/cobalt/web/navigator_base.cc b/cobalt/web/navigator_base.cc
index 4922cb8..e4f9f07 100644
--- a/cobalt/web/navigator_base.cc
+++ b/cobalt/web/navigator_base.cc
@@ -55,13 +55,7 @@
return user_agent_data_;
}
-bool NavigatorBase::on_line() const {
-#if SB_API_VERSION >= 13
- return !SbSystemNetworkIsDisconnected();
-#else
- return true;
-#endif
-}
+bool NavigatorBase::on_line() const { return !SbSystemNetworkIsDisconnected(); }
} // namespace web
} // namespace cobalt
diff --git a/cobalt/web/navigator_ua_data.cc b/cobalt/web/navigator_ua_data.cc
index 52ac278..4ca973c 100644
--- a/cobalt/web/navigator_ua_data.cc
+++ b/cobalt/web/navigator_ua_data.cc
@@ -60,6 +60,9 @@
platform_info->evergreen_file_type());
all_high_entropy_values_.set_evergreen_version(
platform_info->evergreen_version());
+ all_high_entropy_values_.set_firmware_version_details(
+ platform_info->firmware_version_details());
+ all_high_entropy_values_.set_os_experience(platform_info->os_experience());
all_high_entropy_values_.set_starboard_version(
platform_info->starboard_version());
all_high_entropy_values_.set_original_design_manufacturer(
diff --git a/cobalt/web/testing/mock_user_agent_platform_info.h b/cobalt/web/testing/mock_user_agent_platform_info.h
index 4f87cfd..1ef4301 100644
--- a/cobalt/web/testing/mock_user_agent_platform_info.h
+++ b/cobalt/web/testing/mock_user_agent_platform_info.h
@@ -40,9 +40,11 @@
base::Optional<std::string> original_design_manufacturer() const override {
return optional_empty_string_;
}
+#if SB_API_VERSION < 15
SbSystemDeviceType device_type() const override {
return kSbSystemDeviceTypeUnknown;
}
+#endif
const std::string& device_type_string() const override {
return empty_string_;
}
@@ -73,6 +75,10 @@
const std::string& evergreen_version() const override {
return empty_string_;
}
+ const std::string& firmware_version_details() const override {
+ return empty_string_;
+ }
+ const std::string& os_experience() const override { return empty_string_; }
const std::string& cobalt_version() const override { return empty_string_; }
const std::string& cobalt_build_version_number() const override {
return empty_string_;
diff --git a/cobalt/web/user_agent_platform_info.h b/cobalt/web/user_agent_platform_info.h
index 4d7e51a..6ee6e9c 100644
--- a/cobalt/web/user_agent_platform_info.h
+++ b/cobalt/web/user_agent_platform_info.h
@@ -32,7 +32,9 @@
virtual const std::string& starboard_version() const = 0;
virtual const std::string& os_name_and_version() const = 0;
virtual base::Optional<std::string> original_design_manufacturer() const = 0;
+#if SB_API_VERSION < 15
virtual SbSystemDeviceType device_type() const = 0;
+#endif
virtual const std::string& device_type_string() const = 0;
virtual base::Optional<std::string> chipset_model_number() const = 0;
virtual base::Optional<std::string> model_year() const = 0;
@@ -45,6 +47,8 @@
virtual const std::string& evergreen_type() const = 0;
virtual const std::string& evergreen_file_type() const = 0;
virtual const std::string& evergreen_version() const = 0;
+ virtual const std::string& firmware_version_details() const = 0;
+ virtual const std::string& os_experience() const = 0;
virtual const std::string& cobalt_version() const = 0;
virtual const std::string& cobalt_build_version_number() const = 0;
diff --git a/cobalt/webdriver/BUILD.gn b/cobalt/webdriver/BUILD.gn
index e275c91..bce97f0 100644
--- a/cobalt/webdriver/BUILD.gn
+++ b/cobalt/webdriver/BUILD.gn
@@ -31,6 +31,7 @@
"//cobalt/script",
"//net",
"//net:http_server",
+ "//starboard/common",
"//third_party/icu:icuuc",
"//url",
]
diff --git a/cobalt/webdriver/algorithms.cc b/cobalt/webdriver/algorithms.cc
index e4c6a5b..155e605 100644
--- a/cobalt/webdriver/algorithms.cc
+++ b/cobalt/webdriver/algorithms.cc
@@ -455,7 +455,7 @@
}
// There is a spec for "displayedness" available:
-// https://w3c.github.io/webdriver/webdriver-spec.html#element-displayedness
+// https://www.w3.org/TR/2015/WD-webdriver-20150808/#element-displayedness
// However, the algorithm described in the spec does not match existing
// implementations of WebDriver.
// IsDisplayed will match the existing implementations, using the implementation
diff --git a/cobalt/webdriver/algorithms.h b/cobalt/webdriver/algorithms.h
index d75d3b2..df7f2cc 100644
--- a/cobalt/webdriver/algorithms.h
+++ b/cobalt/webdriver/algorithms.h
@@ -24,13 +24,13 @@
namespace algorithms {
// Implementation of getElementText algorithm.
-// https://w3c.github.io/webdriver/webdriver-spec.html#getelementtext
+// https://www.w3.org/TR/2015/WD-webdriver-20150808/#getelementtext
// The spec is not totally clear and, according to comments on the spec, does
// not exactly match the behavior of existing WebDriver implementations. This
// implementation will follow the de-facto standards where they differ.
std::string GetElementText(dom::Element* element);
-// https://w3c.github.io/webdriver/webdriver-spec.html#element-displayedness
+// https://www.w3.org/TR/2015/WD-webdriver-20150808/#element-displayedness
// The spec does not exactly match the behavior of existing WebDriver
// implementations. Consistency with existing implementations will be preferred
// over strict conformance to the draft spec.
diff --git a/cobalt/webdriver/dispatcher.h b/cobalt/webdriver/dispatcher.h
index 56796ba..ae29bb1 100644
--- a/cobalt/webdriver/dispatcher.h
+++ b/cobalt/webdriver/dispatcher.h
@@ -76,8 +76,8 @@
};
// Send the result of the execution of a registered WebDriver command to be
// sent as a response as described in the spec:
- // https://github.com/SeleniumHQ/selenium/wiki/JsonWireProtocol#Responses
- // https://github.com/SeleniumHQ/selenium/wiki/JsonWireProtocol#Failed-Commands
+ // https://www.selenium.dev/documentation/legacy/json_wire_protocol/#responses
+ // https://www.selenium.dev/documentation/legacy/json_wire_protocol/#failed-commands
virtual void SendResult(
const base::Optional<protocol::SessionId>& session_id,
protocol::Response::StatusCode status_code,
@@ -95,7 +95,7 @@
// Some forms of Invalid Requests are detected in the CommandCallback by
// checking the path variables and command parameters. Invalid requests are
// described here:
- // https://github.com/SeleniumHQ/selenium/wiki/JsonWireProtocol#Invalid-Requests
+ // https://www.selenium.dev/documentation/legacy/json_wire_protocol/#invalid-requests
//
// TODO: Invalid requests should be handled before calling the
// CommandCallback.
diff --git a/cobalt/webdriver/element_driver.cc b/cobalt/webdriver/element_driver.cc
index 0f36d4b..3cbc361 100644
--- a/cobalt/webdriver/element_driver.cc
+++ b/cobalt/webdriver/element_driver.cc
@@ -12,10 +12,11 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#include <memory>
-
#include "cobalt/webdriver/element_driver.h"
+#include <memory>
+
+#include "base/threading/thread_task_runner_handle.h"
#include "cobalt/cssom/property_value.h"
#include "cobalt/cssom/viewport_size.h"
#include "cobalt/dom/document.h"
@@ -225,14 +226,14 @@
}
dom::Element* ElementDriver::GetWeakElement() {
- DCHECK_EQ(base::MessageLoop::current()->task_runner(), element_task_runner_);
+ DCHECK_EQ(base::ThreadTaskRunnerHandle::Get(), element_task_runner_);
return element_.get();
}
util::CommandResult<void> ElementDriver::SendKeysInternal(
std::unique_ptr<Keyboard::KeyboardEventVector> events) {
typedef util::CommandResult<void> CommandResult;
- DCHECK_EQ(base::MessageLoop::current()->task_runner(), element_task_runner_);
+ DCHECK_EQ(base::ThreadTaskRunnerHandle::Get(), element_task_runner_);
if (!element_) {
return CommandResult(protocol::Response::kStaleElementReference);
}
@@ -257,7 +258,7 @@
util::CommandResult<void> ElementDriver::SendClickInternal(
const protocol::Button& button) {
typedef util::CommandResult<void> CommandResult;
- DCHECK_EQ(base::MessageLoop::current()->task_runner(), element_task_runner_);
+ DCHECK_EQ(base::ThreadTaskRunnerHandle::Get(), element_task_runner_);
if (!element_) {
return CommandResult(protocol::Response::kStaleElementReference);
}
@@ -266,14 +267,14 @@
return CommandResult(protocol::Response::kElementNotVisible);
}
// Click on an element.
- // https://github.com/SeleniumHQ/selenium/wiki/JsonWireProtocol#sessionsessionidelementidclick
+ // https://www.selenium.dev/documentation/legacy/json_wire_protocol/#sessionsessionidelementidclick
// The Element Click clicks the in-view center point of the element
- // https://w3c.github.io/webdriver/webdriver-spec.html#dfn-element-click
+ // https://www.w3.org/TR/2015/WD-webdriver-20150808/#click
// An element's in-view center point is the origin position of the rectangle
// that is the intersection between the element's first DOM client rectangle
// and the initial viewport.
- // https://w3c.github.io/webdriver/webdriver-spec.html#dfn-in-view-center-point
+ // https://www.w3.org/TR/2017/WD-webdriver-20170125/#dfn-in-view-center-point
scoped_refptr<dom::DOMRectList> dom_rects = element_->GetClientRects();
if (dom_rects->length() == 0) {
return CommandResult(protocol::Response::kElementNotVisible);
@@ -325,7 +326,7 @@
util::CommandResult<std::string> ElementDriver::RequestScreenshotInternal(
Screenshot::GetScreenshotFunction get_screenshot_function) {
typedef util::CommandResult<std::string> CommandResult;
- DCHECK_EQ(base::MessageLoop::current()->task_runner(), element_task_runner_);
+ DCHECK_EQ(base::ThreadTaskRunnerHandle::Get(), element_task_runner_);
if (!element_) {
return CommandResult(protocol::Response::kStaleElementReference);
}
@@ -337,7 +338,7 @@
template <typename T>
util::CommandResult<T> ElementDriver::FindElementsInternal(
const protocol::SearchStrategy& strategy) {
- DCHECK_EQ(base::MessageLoop::current()->task_runner(), element_task_runner_);
+ DCHECK_EQ(base::ThreadTaskRunnerHandle::Get(), element_task_runner_);
typedef util::CommandResult<T> CommandResult;
if (!element_) {
return CommandResult(protocol::Response::kStaleElementReference);
@@ -348,7 +349,7 @@
util::CommandResult<bool> ElementDriver::EqualsInternal(
const ElementDriver* other_element_driver) {
- DCHECK_EQ(base::MessageLoop::current()->task_runner(), element_task_runner_);
+ DCHECK_EQ(base::ThreadTaskRunnerHandle::Get(), element_task_runner_);
typedef util::CommandResult<bool> CommandResult;
base::WeakPtr<dom::Element> other_element = other_element_driver->element_;
if (!element_ || !other_element) {
diff --git a/cobalt/webdriver/element_driver.h b/cobalt/webdriver/element_driver.h
index 5a29e5d..471f129 100644
--- a/cobalt/webdriver/element_driver.h
+++ b/cobalt/webdriver/element_driver.h
@@ -46,7 +46,7 @@
// ElementDriver could be considered a WebElement as described in the WebDriver
// spec.
-// https://github.com/SeleniumHQ/selenium/wiki/JsonWireProtocol#webelement
+// https://www.selenium.dev/documentation/legacy/json_wire_protocol/#webelement
// Commands that interact with a WebElement, such as:
// /session/:sessionId/element/:id/some_command
// will map to a method on this class.
diff --git a/cobalt/webdriver/execute_test.cc b/cobalt/webdriver/execute_test.cc
index 516430c..32d969b 100644
--- a/cobalt/webdriver/execute_test.cc
+++ b/cobalt/webdriver/execute_test.cc
@@ -18,6 +18,7 @@
#include "base/json/json_reader.h"
#include "base/run_loop.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "cobalt/dom/document.h"
#include "cobalt/dom/testing/stub_window.h"
#include "cobalt/script/global_environment.h"
@@ -147,7 +148,7 @@
// Let the message loop run for 200ms to allow enough time for the async
// script to fire the callback.
base::RunLoop run_loop;
- base::MessageLoop::current()->task_runner()->PostDelayedTask(
+ base::ThreadTaskRunnerHandle::Get()->PostDelayedTask(
FROM_HERE, run_loop.QuitClosure(),
base::TimeDelta::FromMilliseconds(200));
run_loop.Run();
@@ -172,7 +173,7 @@
// Let the message loop run for 200ms to allow enough time for the async
// timeout to fire.
base::RunLoop run_loop;
- base::MessageLoop::current()->task_runner()->PostDelayedTask(
+ base::ThreadTaskRunnerHandle::Get()->PostDelayedTask(
FROM_HERE, run_loop.QuitClosure(),
base::TimeDelta::FromMilliseconds(200));
run_loop.Run();
diff --git a/cobalt/webdriver/protocol/button.h b/cobalt/webdriver/protocol/button.h
index 9866ea5..50db380 100644
--- a/cobalt/webdriver/protocol/button.h
+++ b/cobalt/webdriver/protocol/button.h
@@ -27,9 +27,9 @@
// Represents the JSON parameters passed to the click, buttondown, and buttonup
// WebDriver commands.
-// https://github.com/SeleniumHQ/selenium/wiki/JsonWireProtocol#sessionsessionidelementidclick
-// https://github.com/SeleniumHQ/selenium/wiki/JsonWireProtocol#sessionsessionidbuttondown
-// https://github.com/SeleniumHQ/selenium/wiki/JsonWireProtocol#sessionsessionidbuttonup
+// https://www.selenium.dev/documentation/legacy/json_wire_protocol/#sessionsessionidelementidclick
+// https://www.selenium.dev/documentation/legacy/json_wire_protocol/#sessionsessionidbuttondown
+// https://www.selenium.dev/documentation/legacy/json_wire_protocol/#sessionsessionidbuttonup
class Button {
public:
static std::unique_ptr<base::Value> ToValue(const Button& button);
diff --git a/cobalt/webdriver/protocol/capabilities.h b/cobalt/webdriver/protocol/capabilities.h
index 54ca1b0..87893de 100644
--- a/cobalt/webdriver/protocol/capabilities.h
+++ b/cobalt/webdriver/protocol/capabilities.h
@@ -45,7 +45,7 @@
private:
Capabilities() {}
// The capabilities listed here:
- // https://github.com/SeleniumHQ/selenium/wiki/JsonWireProtocol#Capabilities-JSON-Object
+ // https://www.selenium.dev/documentation/legacy/json_wire_protocol/#capabilities-json-object
base::Optional<std::string> browser_name_;
base::Optional<std::string> version_;
diff --git a/cobalt/webdriver/protocol/cookie.h b/cobalt/webdriver/protocol/cookie.h
index 4d1357d..70f69cb 100644
--- a/cobalt/webdriver/protocol/cookie.h
+++ b/cobalt/webdriver/protocol/cookie.h
@@ -27,7 +27,7 @@
namespace webdriver {
namespace protocol {
-// https://w3c.github.io/webdriver/webdriver-spec.html#cookies
+// https://www.w3.org/TR/2015/WD-webdriver-20150808/#cookies
class Cookie {
public:
static std::unique_ptr<base::Value> ToValue(const Cookie& cookie);
diff --git a/cobalt/webdriver/protocol/element_id.h b/cobalt/webdriver/protocol/element_id.h
index 94b75c3..ec322a6 100644
--- a/cobalt/webdriver/protocol/element_id.h
+++ b/cobalt/webdriver/protocol/element_id.h
@@ -31,7 +31,7 @@
static const char kElementKey[];
// Convert the ElementId to a WebElement JSON object:
- // https://github.com/SeleniumHQ/selenium/wiki/JsonWireProtocol#WebElement-JSON-Object
+ // https://www.selenium.dev/documentation/legacy/json_wire_protocol/#webelement-json-object
static std::unique_ptr<base::Value> ToValue(const ElementId& element_id);
static base::Optional<ElementId> FromValue(const base::Value* value);
diff --git a/cobalt/webdriver/protocol/frame_id.h b/cobalt/webdriver/protocol/frame_id.h
index 4e2e7ce..3cdaeb7 100644
--- a/cobalt/webdriver/protocol/frame_id.h
+++ b/cobalt/webdriver/protocol/frame_id.h
@@ -22,7 +22,7 @@
namespace webdriver {
namespace protocol {
-// https://github.com/SeleniumHQ/selenium/wiki/JsonWireProtocol#sessionsessionidframe
+// https://www.selenium.dev/documentation/legacy/json_wire_protocol/#sessionsessionidframe
// Since Cobalt doesn't support multiple frames, the only valid value for this
// command is to request switching to the top-level browsing context, which
// is always active.
diff --git a/cobalt/webdriver/protocol/log_entry.cc b/cobalt/webdriver/protocol/log_entry.cc
index 297c084..d0682c1 100644
--- a/cobalt/webdriver/protocol/log_entry.cc
+++ b/cobalt/webdriver/protocol/log_entry.cc
@@ -12,10 +12,10 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#include <memory>
-
#include "cobalt/webdriver/protocol/log_entry.h"
+#include <memory>
+
#include "base/logging.h"
namespace cobalt {
@@ -48,7 +48,7 @@
std::unique_ptr<base::DictionaryValue> log_entry_value(
new base::DictionaryValue());
// Format of the Log Entry object can be found here:
- // https://github.com/SeleniumHQ/selenium/wiki/JsonWireProtocol#Log-Entry-JSON-Object
+ // https://www.selenium.dev/documentation/legacy/json_wire_protocol/#log-entry-json-object
// timestamp is in milliseconds since the Unix Epoch.
log_entry_value->SetInteger("timestamp",
log_entry.timestamp_.InMilliseconds());
diff --git a/cobalt/webdriver/protocol/log_entry.h b/cobalt/webdriver/protocol/log_entry.h
index 94d01bd..777b66f 100644
--- a/cobalt/webdriver/protocol/log_entry.h
+++ b/cobalt/webdriver/protocol/log_entry.h
@@ -26,7 +26,7 @@
namespace protocol {
// Log entry object:
-// https://github.com/SeleniumHQ/selenium/wiki/JsonWireProtocol#Log-Entry-JSON-Object
+// https://www.selenium.dev/documentation/legacy/json_wire_protocol/#log-entry-json-object
class LogEntry {
public:
enum LogLevel {
diff --git a/cobalt/webdriver/protocol/moveto.h b/cobalt/webdriver/protocol/moveto.h
index c1be6dc..0819a1f 100644
--- a/cobalt/webdriver/protocol/moveto.h
+++ b/cobalt/webdriver/protocol/moveto.h
@@ -26,7 +26,7 @@
namespace protocol {
// Represents the JSON parameters passed to the moveto WebDriver command.
-// https://github.com/SeleniumHQ/selenium/wiki/JsonWireProtocol#sessionsessionidmoveto
+// https://www.selenium.dev/documentation/legacy/json_wire_protocol/#sessionsessionidmoveto
class Moveto {
public:
static std::unique_ptr<base::Value> ToValue(const Moveto& moveto);
diff --git a/cobalt/webdriver/protocol/response.h b/cobalt/webdriver/protocol/response.h
index 0244c14..1995ed2 100644
--- a/cobalt/webdriver/protocol/response.h
+++ b/cobalt/webdriver/protocol/response.h
@@ -29,7 +29,7 @@
class Response {
public:
// WebDriver Response Status Codes:
- // https://github.com/SeleniumHQ/selenium/wiki/JsonWireProtocol#Response-Status-Codes
+ // https://www.selenium.dev/documentation/legacy/json_wire_protocol/#response-status-codes
enum StatusCode {
// The command executed successfully.
kSuccess = 0,
@@ -79,13 +79,13 @@
// Create a JSON object that will be used as the response body for a failed
// command:
- // https://github.com/SeleniumHQ/selenium/wiki/JsonWireProtocol#Failed-Commands
+ // https://www.selenium.dev/documentation/legacy/json_wire_protocol/#failed-commands
// TODO: Add support for screenshot, stack trace, etc.
static std::unique_ptr<base::Value> CreateErrorResponse(
const std::string& message);
// Create a JSON object that will be used as the response body for a command:
- // https://github.com/SeleniumHQ/selenium/wiki/JsonWireProtocol#Responses
+ // https://www.selenium.dev/documentation/legacy/json_wire_protocol/#responses
static std::unique_ptr<base::Value> CreateResponse(
const base::Optional<protocol::SessionId>& session_id,
StatusCode status_code,
diff --git a/cobalt/webdriver/protocol/script.h b/cobalt/webdriver/protocol/script.h
index 78256fc..e1ad598 100644
--- a/cobalt/webdriver/protocol/script.h
+++ b/cobalt/webdriver/protocol/script.h
@@ -27,8 +27,8 @@
// Represents the JSON parameters passed to the execute and execute_async
// WebDriver commands.
-// https://github.com/SeleniumHQ/selenium/wiki/JsonWireProtocol#sessionsessionidexecute
-// https://github.com/SeleniumHQ/selenium/wiki/JsonWireProtocol#sessionsessionidexecute-async
+// https://www.selenium.dev/documentation/legacy/json_wire_protocol/#sessionsessionidexecute
+// https://www.selenium.dev/documentation/legacy/json_wire_protocol/#sessionsessionidexecute_async
class Script {
public:
static base::Optional<Script> FromValue(const base::Value* script);
diff --git a/cobalt/webdriver/protocol/server_status.cc b/cobalt/webdriver/protocol/server_status.cc
index ed5d409..639074d 100644
--- a/cobalt/webdriver/protocol/server_status.cc
+++ b/cobalt/webdriver/protocol/server_status.cc
@@ -12,18 +12,21 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#include <memory>
-
#include "cobalt/webdriver/protocol/server_status.h"
+#include <memory>
+#include <utility>
+
#include "cobalt/version.h"
+#include "starboard/common/system_property.h"
+
+using starboard::kSystemPropertyMaxLength;
namespace cobalt {
namespace webdriver {
namespace protocol {
ServerStatus::ServerStatus() {
- const size_t kSystemPropertyMaxLength = 1024;
char value[kSystemPropertyMaxLength];
bool result;
diff --git a/cobalt/webdriver/protocol/server_status.h b/cobalt/webdriver/protocol/server_status.h
index f100f41..708f61f 100644
--- a/cobalt/webdriver/protocol/server_status.h
+++ b/cobalt/webdriver/protocol/server_status.h
@@ -27,7 +27,7 @@
// Represents the JSON object that describes the WebDriver server's current
// status in response to the /status command:
-// https://github.com/SeleniumHQ/selenium/wiki/JsonWireProtocol#status
+// https://www.selenium.dev/documentation/legacy/json_wire_protocol/#status
class ServerStatus {
public:
ServerStatus();
diff --git a/cobalt/webdriver/protocol/session_id.h b/cobalt/webdriver/protocol/session_id.h
index 8fc7f45..b262940 100644
--- a/cobalt/webdriver/protocol/session_id.h
+++ b/cobalt/webdriver/protocol/session_id.h
@@ -25,7 +25,7 @@
namespace protocol {
// sessionId is mentioned in the spec describing WebDriver responses:
-// https://github.com/SeleniumHQ/selenium/wiki/JsonWireProtocol#Responses
+// https://www.selenium.dev/documentation/legacy/json_wire_protocol/#responses
//
class SessionId {
public:
diff --git a/cobalt/webdriver/server.cc b/cobalt/webdriver/server.cc
index f7bff66..06093eb 100644
--- a/cobalt/webdriver/server.cc
+++ b/cobalt/webdriver/server.cc
@@ -16,12 +16,14 @@
#include <memory>
#include <string>
+#include <utility>
#include <vector>
#include "base/bind.h"
#include "base/json/json_reader.h"
#include "base/json/json_writer.h"
#include "base/strings/string_util.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "base/trace_event/trace_event.h"
#include "net/base/ip_endpoint.h"
#include "net/base/net_errors.h"
@@ -72,11 +74,11 @@
class ResponseHandlerImpl : public WebDriverServer::ResponseHandler {
public:
ResponseHandlerImpl(net::HttpServer* server, int connection_id)
- : task_runner_(base::MessageLoop::current()->task_runner()),
+ : task_runner_(base::ThreadTaskRunnerHandle::Get()),
server_(server),
connection_id_(connection_id) {}
- // https://github.com/SeleniumHQ/selenium/wiki/JsonWireProtocol#Responses
+ // https://www.selenium.dev/documentation/legacy/json_wire_protocol/#responses
void Success(std::unique_ptr<base::Value> value) override {
DCHECK(value);
std::string data;
@@ -94,7 +96,7 @@
// Failed commands map to a valid WebDriver command and contain the expected
// parameters, but otherwise failed to execute for some reason. This should
// send a 500 Internal Server Error.
- // https://github.com/SeleniumHQ/selenium/wiki/JsonWireProtocol#Error-Handling
+ // https://www.selenium.dev/documentation/legacy/json_wire_protocol/#error-handling
void FailedCommand(std::unique_ptr<base::Value> value) override {
DCHECK(value);
std::string data;
@@ -103,7 +105,7 @@
}
// A number of cases for invalid requests are explained here:
- // https://github.com/SeleniumHQ/selenium/wiki/JsonWireProtocol#Invalid-Requests
+ // https://www.selenium.dev/documentation/legacy/json_wire_protocol/#invalid-requests
// The response type should be text/plain and the message body is an error
// message
@@ -170,7 +172,7 @@
const std::string& content_type,
base::Optional<net::HttpServerResponseInfo> response_info =
base::Optional<net::HttpServerResponseInfo>()) {
- if (base::MessageLoop::current()->task_runner() == task_runner_) {
+ if (base::ThreadTaskRunnerHandle::Get() == task_runner_) {
SendToServer(server_, connection_id_, status, message, content_type,
response_info);
} else {
diff --git a/cobalt/webdriver/server.h b/cobalt/webdriver/server.h
index ffc9372..d93ae6d 100644
--- a/cobalt/webdriver/server.h
+++ b/cobalt/webdriver/server.h
@@ -54,18 +54,18 @@
class ResponseHandler {
public:
// Called after a successful WebDriver command.
- // https://github.com/SeleniumHQ/selenium/wiki/JsonWireProtocol#Responses
+ // https://www.selenium.dev/documentation/legacy/json_wire_protocol/#responses
virtual void Success(std::unique_ptr<base::Value>) = 0;
// |content_type| specifies the type of the data using HTTP mime types.
virtual void SuccessData(const std::string& content_type, const char* data,
int len) = 0;
// Called after a failed WebDriver command
- // https://github.com/SeleniumHQ/selenium/wiki/JsonWireProtocol#Failed-Commands
+ // https://www.selenium.dev/documentation/legacy/json_wire_protocol/#failed-commands
virtual void FailedCommand(std::unique_ptr<base::Value>) = 0;
// Called after an invalid request.
- // https://github.com/SeleniumHQ/selenium/wiki/JsonWireProtocol#Invalid-Requests
+ // https://www.selenium.dev/documentation/legacy/json_wire_protocol/#invalid-requests
virtual void UnknownCommand(const std::string& path) = 0;
virtual void UnimplementedCommand(const std::string& path) = 0;
virtual void VariableResourceNotFound(const std::string& variable_name) = 0;
diff --git a/cobalt/webdriver/testdata/simple_test.py b/cobalt/webdriver/testdata/simple_test.py
index bf532b0..5b5a9a1 100755
--- a/cobalt/webdriver/testdata/simple_test.py
+++ b/cobalt/webdriver/testdata/simple_test.py
@@ -21,7 +21,7 @@
import requests
# This is a simple script for Webdriver Wire Protocol communication.
-# https://github.com/SeleniumHQ/selenium/wiki/JsonWireProtocol
+# https://www.selenium.dev/documentation/legacy/json_wire_protocol
WEBDRIVER_HOST = 'http://localhost:4444'
@@ -30,7 +30,7 @@
DELETE = 'DELETE'
# WebDriver Response Status Codes:
-# https://github.com/SeleniumHQ/selenium/wiki/JsonWireProtocol#Response-Status-Codes
+# https://www.selenium.dev/documentation/legacy/json_wire_protocol/#response-status-codes
RESPONSE_STATUS_CODES = {
0: 'Success',
@@ -72,24 +72,29 @@
Returns:
The dictionary returned by the WebDriver server
"""
- url = '%s/%s' % (WEBDRIVER_HOST, path)
+ url = f'{WEBDRIVER_HOST}/{path}'
headers = {'content-type': 'application/json', 'Accept-Charset': 'UTF-8'}
if request_type == GET:
- request = requests.get(url, data=json.dumps(parameters), headers=headers)
+ request = requests.get(
+ url, data=json.dumps(parameters), headers=headers, timeout=10)
if request_type == POST:
- request = requests.post(url, data=json.dumps(parameters), headers=headers)
+ request = requests.post(
+ url, data=json.dumps(parameters), headers=headers, timeout=10)
if request_type == DELETE:
- request = requests.delete(url, data=json.dumps(parameters), headers=headers)
+ request = requests.delete(
+ url, data=json.dumps(parameters), headers=headers, timeout=10)
result = request.text if request.headers[
'content-type'] == 'text/plain' else request.json()
if request.status_code == 200:
return result
else:
- print('*** Error %d %s: \"%s\"' %
- (request.status_code, RESPONSE_STATUS_CODES[result['status']]
- if isinstance(result, dict) else 'unknown',
- result['value']['message'] if isinstance(result, dict) else result))
- print('*** Error %d: %s' % (request.status_code, result))
+ print(
+ f'*** Error {request.status_code} '
+ f"{RESPONSE_STATUS_CODES[result['status']] if isinstance(result, dict) else 'unknown'}" # pylint: disable=line-too-long
+ ': \"'
+ f"{result['value']['message'] if isinstance(result, dict) else result}"
+ '\"')
+ print(f'*** Error {request.status_code}: {result}')
return None
@@ -106,9 +111,8 @@
The dictionary returned by the WebDriver server
"""
if path:
- return Request(request_type, 'session/%s/%s' % (session_id, path),
- parameters)
- return Request(request_type, 'session/%s' % (session_id), parameters)
+ return Request(request_type, f'session/{session_id}/{path}', parameters)
+ return Request(request_type, f'session/{session_id}', parameters)
def ElementRequest(session_id,
@@ -117,8 +121,7 @@
path=None,
parameters=None):
return SessionRequest(session_id, request_type,
- 'element/%s/%s' % (element_id[u'ELEMENT'], path),
- parameters)
+ f"element/{element_id['ELEMENT']}/{path}", parameters)
def GetSessionID():
@@ -129,7 +132,7 @@
"""
request = Request(POST, 'session', {'desiredCapabilities': {}})
if request:
- session_id = request[u'sessionId']
+ session_id = request['sessionId']
else:
# If creating a new session id fails, use an already existing session.
request = Request(GET, 'sessions')
@@ -159,7 +162,7 @@
"""
request = SessionRequest(session_id, GET, 'screenshot')
if request:
- with open(filename, 'w') as f:
+ with open(filename, 'w', encoding='utf-8') as f:
f.write(binascii.a2b_base64(request['value']))
f.close()
@@ -173,7 +176,7 @@
"""
request = ElementRequest(session_id, element_id, GET, 'screenshot')
if request:
- with open(filename, 'w') as f:
+ with open(filename, 'w', encoding='utf-8') as f:
f.write(binascii.a2b_base64(request['value']))
f.close()
@@ -184,22 +187,22 @@
def Moveto(session_id, element, xoffset, yoffset):
return SessionRequest(session_id, POST, 'moveto', {
- u'element': element,
- u'xoffset': xoffset,
- u'yoffset': yoffset
+ 'element': element,
+ 'xoffset': xoffset,
+ 'yoffset': yoffset
})
def Click(session_id, button):
- return SessionRequest(session_id, POST, 'click', {u'button': button})
+ return SessionRequest(session_id, POST, 'click', {'button': button})
def Buttondown(session_id, button):
- return SessionRequest(session_id, POST, 'buttondown', {u'button': button})
+ return SessionRequest(session_id, POST, 'buttondown', {'button': button})
def Buttonup(session_id, button):
- return SessionRequest(session_id, POST, 'buttonup', {u'button': button})
+ return SessionRequest(session_id, POST, 'buttonup', {'button': button})
def ElementName(session_id, element_id):
@@ -212,17 +215,17 @@
def ElementClick(session_id, element_id, button):
return ElementRequest(session_id, element_id, POST, 'click',
- {u'button': button})
+ {'button': button})
def ElementKeys(session_id, element_id, keys):
- return ElementRequest(session_id, element_id, POST, 'value', {u'value': keys})
+ return ElementRequest(session_id, element_id, POST, 'value', {'value': keys})
def ElementFind(session_id, using, value):
result = SessionRequest(session_id, POST, 'element', {
- u'using': using,
- u'value': value
+ 'using': using,
+ 'value': value
})
return None if result is None else result['value']
@@ -233,17 +236,17 @@
session_id = GetSessionID()
try:
active_element = GetActiveElement(session_id)
- print('active_element : %s' % active_element)
+ print(f'active_element : {active_element}')
for xoffset in range(0, 1900, 20):
- print('Moveto: %s' % Moveto(session_id, active_element, xoffset, 200))
+ print(f'Moveto: {Moveto(session_id, active_element, xoffset, 200)}')
time.sleep(0.05)
selected_element = ElementFind(session_id, 'class name',
'ytlr-tile-renderer--focused')
- print('selected_element : %s' % selected_element)
+ print(f'selected_element : {selected_element}')
- print('ElementClick: %s' % ElementClick(session_id, selected_element, 0))
+ print(f'ElementClick: {ElementClick(session_id, selected_element, 0)}')
except KeyboardInterrupt:
print('Bye')
@@ -258,17 +261,16 @@
try:
selected_element = ElementFind(session_id, 'class name',
'ytlr-tile-renderer--focused')
- print('Selected List element : %s' % selected_element)
+ print(f'Selected List element : {selected_element}')
# Write screenshots for the selected element, until interrupted.
while True:
selected_element = ElementFind(session_id, 'class name',
'ytlr-tile-renderer--focused')
- print('Selected List element : %s' % selected_element)
+ print(f'Selected List element : {selected_element}')
if selected_element is not None:
- print('GetElementScreenShot: %s' % GetElementScreenShot(
- session_id, selected_element,
- 'element-' + selected_element['ELEMENT'] + '.png'))
+ print('GetElementScreenShot: '
+ f"{GetElementScreenShot(session_id, selected_element, 'element-' + selected_element['ELEMENT'] + '.png')}") # pylint: disable=line-too-long
except KeyboardInterrupt:
print('Bye')
@@ -282,21 +284,21 @@
session_id = GetSessionID()
try:
initial_active_element = GetActiveElement(session_id)
- print('initial active_element : %s' % initial_active_element)
+ print(f'initial active_element : {initial_active_element}')
initial_selected_element = ElementFind(session_id, 'class name',
'ytlr-tile-renderer--focused')
- print('Selected List element : %s' % initial_selected_element)
+ print(f'Selected List element : {initial_selected_element}')
while True:
active_element = GetActiveElement(session_id)
- print('active_element : %s' % active_element)
+ print(f'active_element : {active_element}')
if initial_active_element != active_element:
break
selected_element = ElementFind(session_id, 'class name',
'ytlr-tile-renderer--focused')
- print('Selected List element : %s' % selected_element)
+ print(f'Selected List element : {selected_element}')
if initial_selected_element != selected_element:
break
diff --git a/cobalt/webdriver/util/call_on_message_loop.h b/cobalt/webdriver/util/call_on_message_loop.h
index 90957a0..2d89963 100644
--- a/cobalt/webdriver/util/call_on_message_loop.h
+++ b/cobalt/webdriver/util/call_on_message_loop.h
@@ -22,6 +22,7 @@
#include "base/memory/ref_counted.h"
#include "base/message_loop/message_loop.h"
#include "base/synchronization/waitable_event.h"
+#include "base/threading/thread_task_runner_handle.h"
namespace cobalt {
namespace webdriver {
@@ -37,7 +38,7 @@
: completed_event_(base::WaitableEvent::ResetPolicy::MANUAL,
base::WaitableEvent::InitialState::NOT_SIGNALED),
success_(false) {
- DCHECK_NE(base::MessageLoop::current()->task_runner(), task_runner);
+ DCHECK_NE(base::ThreadTaskRunnerHandle::Get(), task_runner);
std::unique_ptr<DeletionSignaler> dt(
new DeletionSignaler(&completed_event_));
// Note that while base::MessageLoopProxy::PostTask returns false
diff --git a/cobalt/webdriver/web_driver_module.cc b/cobalt/webdriver/web_driver_module.cc
index 3a4d360..8b1edfc 100644
--- a/cobalt/webdriver/web_driver_module.cc
+++ b/cobalt/webdriver/web_driver_module.cc
@@ -294,11 +294,18 @@
base::StringPrintf("/session/%s/execute", kSessionIdVariable),
current_window_command_factory->GetCommandHandler(
base::Bind(&WindowDriver::Execute)));
+ // https://www.w3.org/TR/2015/WD-webdriver-20150808/#execute-async-script
webdriver_dispatcher_->RegisterCommand(
WebDriverServer::kPost,
base::StringPrintf("/session/%s/execute_async", kSessionIdVariable),
current_window_command_factory->GetCommandHandler(
base::Bind(&WindowDriver::ExecuteAsync)));
+ // https://www.w3.org/TR/2015/WD-webdriver-20150827/#execute-async-script
+ webdriver_dispatcher_->RegisterCommand(
+ WebDriverServer::kPost,
+ base::StringPrintf("/session/%s/execute/async", kSessionIdVariable),
+ current_window_command_factory->GetCommandHandler(
+ base::Bind(&WindowDriver::ExecuteAsync)));
webdriver_dispatcher_->RegisterCommand(
WebDriverServer::kPost,
base::StringPrintf("/session/%s/element", kSessionIdVariable),
@@ -514,7 +521,7 @@
return NULL;
}
-// https://github.com/SeleniumHQ/selenium/wiki/JsonWireProtocol#status
+// https://www.selenium.dev/documentation/legacy/json_wire_protocol/#status
void WebDriverModule::GetServerStatus(
const base::Value* parameters,
const WebDriverDispatcher::PathVariableMap* path_variables,
@@ -524,7 +531,7 @@
protocol::ServerStatus::ToValue(status_));
}
-// https://github.com/SeleniumHQ/selenium/wiki/JsonWireProtocol#sessions
+// https://www.selenium.dev/documentation/legacy/json_wire_protocol/#sessions
void WebDriverModule::GetActiveSessions(
const base::Value* parameters,
const WebDriverDispatcher::PathVariableMap* path_variables,
@@ -538,7 +545,7 @@
util::internal::ToValue(sessions));
}
-// https://github.com/SeleniumHQ/selenium/wiki/JsonWireProtocol#get-sessionsessionid
+// https://www.selenium.dev/documentation/legacy/json_wire_protocol/#sessionsessionid
void WebDriverModule::CreateSession(
const base::Value* parameters,
const WebDriverDispatcher::PathVariableMap* path_variables,
@@ -564,7 +571,7 @@
result_handler.get());
}
-// https://github.com/SeleniumHQ/selenium/wiki/JsonWireProtocol#delete-sessionsessionid
+// https://www.selenium.dev/documentation/legacy/json_wire_protocol/#sessions
void WebDriverModule::DeleteSession(
const base::Value* parameters,
const WebDriverDispatcher::PathVariableMap* path_variables,
@@ -627,7 +634,7 @@
}
}
-// https://github.com/SeleniumHQ/selenium/wiki/JsonWireProtocol#sessionsessionidscreenshot
+// https://www.selenium.dev/documentation/legacy/json_wire_protocol/#sessionsessionidscreenshot
void WebDriverModule::RequestScreenshot(
const base::Value* parameters,
const WebDriverDispatcher::PathVariableMap* path_variables,
diff --git a/cobalt/webdriver/window_driver.cc b/cobalt/webdriver/window_driver.cc
index da4efbd..43f213a 100644
--- a/cobalt/webdriver/window_driver.cc
+++ b/cobalt/webdriver/window_driver.cc
@@ -18,6 +18,7 @@
#include <utility>
#include "base/synchronization/waitable_event.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "cobalt/dom/document.h"
#include "cobalt/dom/dom_rect.h"
#include "cobalt/dom/location.h"
@@ -164,7 +165,7 @@
&result);
return success ? result : NULL;
}
- DCHECK_EQ(base::MessageLoop::current()->task_runner(), window_task_runner_);
+ DCHECK_EQ(base::ThreadTaskRunnerHandle::Get(), window_task_runner_);
ElementDriverMap::iterator it = element_drivers_.find(element_id.id());
if (it != element_drivers_.end()) {
return it->second;
@@ -398,7 +399,7 @@
protocol::ElementId WindowDriver::ElementToId(
const scoped_refptr<dom::Element>& element) {
- DCHECK_EQ(base::MessageLoop::current()->task_runner(), window_task_runner_);
+ DCHECK_EQ(base::ThreadTaskRunnerHandle::Get(), window_task_runner_);
for (auto i : element_drivers_) {
// Note: The element_task_runner_ is the same as the window_task_runner_.
auto weak_element = i.second->GetWeakElement();
@@ -412,14 +413,14 @@
scoped_refptr<dom::Element> WindowDriver::IdToElement(
const protocol::ElementId& id) {
- DCHECK_EQ(base::MessageLoop::current()->task_runner(), window_task_runner_);
+ DCHECK_EQ(base::ThreadTaskRunnerHandle::Get(), window_task_runner_);
return base::WrapRefCounted(
GetElementDriver(protocol::ElementId(id))->GetWeakElement());
}
protocol::ElementId WindowDriver::CreateNewElementDriver(
const base::WeakPtr<dom::Element>& weak_element) {
- DCHECK_EQ(base::MessageLoop::current()->task_runner(), window_task_runner_);
+ DCHECK_EQ(base::ThreadTaskRunnerHandle::Get(), window_task_runner_);
protocol::ElementId element_id(
base::StringPrintf("element-%d", next_element_id_++));
@@ -442,7 +443,7 @@
template <typename T>
util::CommandResult<T> WindowDriver::FindElementsInternal(
const protocol::SearchStrategy& strategy) {
- DCHECK_EQ(base::MessageLoop::current()->task_runner(), window_task_runner_);
+ DCHECK_EQ(base::ThreadTaskRunnerHandle::Get(), window_task_runner_);
typedef util::CommandResult<T> CommandResult;
if (!window_) {
return CommandResult(protocol::Response::kNoSuchWindow);
@@ -456,7 +457,7 @@
base::Optional<base::TimeDelta> async_timeout,
ScriptExecutorResult::ResultHandler* async_handler) {
typedef util::CommandResult<protocol::ScriptResult> CommandResult;
- DCHECK_EQ(base::MessageLoop::current()->task_runner(), window_task_runner_);
+ DCHECK_EQ(base::ThreadTaskRunnerHandle::Get(), window_task_runner_);
if (!window_) {
return CommandResult(protocol::Response::kNoSuchWindow);
}
@@ -494,7 +495,7 @@
util::CommandResult<void> WindowDriver::SendKeysInternal(
std::unique_ptr<Keyboard::KeyboardEventVector> events) {
typedef util::CommandResult<void> CommandResult;
- DCHECK_EQ(base::MessageLoop::current()->task_runner(), window_task_runner_);
+ DCHECK_EQ(base::ThreadTaskRunnerHandle::Get(), window_task_runner_);
if (!window_) {
return CommandResult(protocol::Response::kNoSuchWindow);
}
@@ -508,7 +509,7 @@
util::CommandResult<void> WindowDriver::NavigateInternal(const GURL& url) {
typedef util::CommandResult<void> CommandResult;
- DCHECK_EQ(base::MessageLoop::current()->task_runner(), window_task_runner_);
+ DCHECK_EQ(base::ThreadTaskRunnerHandle::Get(), window_task_runner_);
if (!window_) {
return CommandResult(protocol::Response::kNoSuchWindow);
}
@@ -519,7 +520,7 @@
util::CommandResult<void> WindowDriver::AddCookieInternal(
const protocol::Cookie& cookie) {
typedef util::CommandResult<void> CommandResult;
- DCHECK_EQ(base::MessageLoop::current()->task_runner(), window_task_runner_);
+ DCHECK_EQ(base::ThreadTaskRunnerHandle::Get(), window_task_runner_);
if (!window_) {
return CommandResult(protocol::Response::kNoSuchWindow);
}
@@ -568,7 +569,7 @@
util::CommandResult<void> WindowDriver::MouseMoveToInternal(
const protocol::Moveto& moveto) {
typedef util::CommandResult<void> CommandResult;
- DCHECK_EQ(base::MessageLoop::current()->task_runner(), window_task_runner_);
+ DCHECK_EQ(base::ThreadTaskRunnerHandle::Get(), window_task_runner_);
if (!window_) {
return CommandResult(protocol::Response::kNoSuchWindow);
}
@@ -577,7 +578,7 @@
// specified, the move is relative to the current mouse cursor. If an element
// is provided but no offset, the mouse will be moved to the center of the
// element.
- // https://github.com/SeleniumHQ/selenium/wiki/JsonWireProtocol#sessionsessionidmoveto
+ // https://www.selenium.dev/documentation/legacy/json_wire_protocol/#sessionsessionidmoveto
float x = 0;
float y = 0;
scoped_refptr<dom::Element> element;
@@ -666,7 +667,7 @@
util::CommandResult<void> WindowDriver::MouseButtonDownInternal(
const protocol::Button& button) {
typedef util::CommandResult<void> CommandResult;
- DCHECK_EQ(base::MessageLoop::current()->task_runner(), window_task_runner_);
+ DCHECK_EQ(base::ThreadTaskRunnerHandle::Get(), window_task_runner_);
if (!window_) {
return CommandResult(protocol::Response::kNoSuchWindow);
}
@@ -679,7 +680,7 @@
util::CommandResult<void> WindowDriver::MouseButtonUpInternal(
const protocol::Button& button) {
typedef util::CommandResult<void> CommandResult;
- DCHECK_EQ(base::MessageLoop::current()->task_runner(), window_task_runner_);
+ DCHECK_EQ(base::ThreadTaskRunnerHandle::Get(), window_task_runner_);
if (!window_) {
return CommandResult(protocol::Response::kNoSuchWindow);
}
@@ -692,7 +693,7 @@
util::CommandResult<void> WindowDriver::SendClickInternal(
const protocol::Button& button) {
typedef util::CommandResult<void> CommandResult;
- DCHECK_EQ(base::MessageLoop::current()->task_runner(), window_task_runner_);
+ DCHECK_EQ(base::ThreadTaskRunnerHandle::Get(), window_task_runner_);
if (!window_) {
return CommandResult(protocol::Response::kNoSuchWindow);
}
@@ -706,7 +707,7 @@
util::CommandResult<protocol::ElementId>
WindowDriver::GetActiveElementInternal() {
typedef util::CommandResult<protocol::ElementId> CommandResult;
- DCHECK_EQ(base::MessageLoop::current()->task_runner(), window_task_runner_);
+ DCHECK_EQ(base::ThreadTaskRunnerHandle::Get(), window_task_runner_);
if (!window_) {
return CommandResult(protocol::Response::kNoSuchWindow);
}
diff --git a/cobalt/webdriver/window_driver.h b/cobalt/webdriver/window_driver.h
index d72f8c9..03e0bf8 100644
--- a/cobalt/webdriver/window_driver.h
+++ b/cobalt/webdriver/window_driver.h
@@ -29,6 +29,7 @@
#include "base/stl_util.h"
#include "base/synchronization/lock.h"
#include "base/threading/thread_checker.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "cobalt/dom/keyboard_event.h"
#include "cobalt/dom/pointer_event.h"
#include "cobalt/dom/pointer_event_init.h"
@@ -117,7 +118,7 @@
const protocol::ElementId& id) override;
dom::Window* GetWeak() {
- DCHECK_EQ(base::MessageLoop::current()->task_runner(), window_task_runner_);
+ DCHECK_EQ(base::ThreadTaskRunnerHandle::Get(), window_task_runner_);
return window_.get();
}
diff --git a/cobalt/websocket/web_socket_impl.cc b/cobalt/websocket/web_socket_impl.cc
index 39b54d2..1d7d262 100644
--- a/cobalt/websocket/web_socket_impl.cc
+++ b/cobalt/websocket/web_socket_impl.cc
@@ -17,6 +17,7 @@
#include <algorithm>
#include <cstdint>
#include <memory>
+#include <utility>
#include "base/basictypes.h"
#include "base/bind_helpers.h"
@@ -24,6 +25,7 @@
#include "base/memory/ref_counted.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_util.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "cobalt/base/polymorphic_downcast.h"
#include "cobalt/websocket/web_socket.h"
#include "net/http/http_util.h"
@@ -36,11 +38,11 @@
WebSocket *delegate)
: network_module_(network_module), delegate_(delegate) {
DCHECK(base::MessageLoop::current());
- owner_task_runner_ = base::MessageLoop::current()->task_runner();
+ owner_task_runner_ = base::ThreadTaskRunnerHandle::Get();
}
void WebSocketImpl::ResetWebSocketEventDelegate() {
- DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
delegate_ = NULL;
delegate_task_runner_->PostTask(
@@ -56,7 +58,7 @@
return;
}
DCHECK(network_module_->url_request_context_getter());
- DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
origin_ = origin;
DLOG(INFO) << "Connecting to websocket at " << url.spec();
@@ -87,7 +89,7 @@
void WebSocketImpl::DoConnect(
scoped_refptr<cobalt::network::URLRequestContextGetter> context,
const GURL &url, base::WaitableEvent *channel_created_event) {
- DCHECK(delegate_task_runner_->BelongsToCurrentThread());
+ DCHECK(delegate_task_runner_->RunsTasksInCurrentSequence());
DCHECK(url.is_valid());
DCHECK(channel_created_event);
DCHECK(context->GetURLRequestContext());
@@ -120,7 +122,7 @@
if (!websocket_channel_) {
return;
}
- DCHECK(delegate_task_runner_->BelongsToCurrentThread());
+ DCHECK(delegate_task_runner_->RunsTasksInCurrentSequence());
auto channel_state = websocket_channel_->StartClosingHandshake(
close_info.code, close_info.reason);
@@ -131,7 +133,7 @@
}
void WebSocketImpl::ResetChannel() {
- DCHECK(delegate_task_runner_->BelongsToCurrentThread());
+ DCHECK(delegate_task_runner_->RunsTasksInCurrentSequence());
websocket_channel_.reset();
}
@@ -169,7 +171,7 @@
void WebSocketImpl::OnWebSocketConnected(
const std::string &selected_subprotocol) {
- DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
if (delegate_) {
delegate_->OnConnected(selected_subprotocol);
@@ -178,7 +180,7 @@
void WebSocketImpl::OnWebSocketDisconnected(bool was_clean, uint16 code,
const std::string &reason) {
- DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
if (delegate_) {
delegate_->OnDisconnected(was_clean, code, reason);
}
@@ -186,14 +188,14 @@
void WebSocketImpl::OnWebSocketReceivedData(
bool is_text_frame, scoped_refptr<net::IOBufferWithSize> data) {
- if (!owner_task_runner_->BelongsToCurrentThread()) {
+ if (!owner_task_runner_->RunsTasksInCurrentSequence()) {
owner_task_runner_->PostTask(
FROM_HERE, base::Bind(&WebSocketImpl::OnWebSocketReceivedData, this,
is_text_frame, data));
return;
}
- DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
if (delegate_) {
delegate_->OnReceivedData(is_text_frame, data);
}
@@ -201,7 +203,7 @@
void WebSocketImpl::OnClose(bool was_clean, int error_code,
const std::string &close_reason) {
- DCHECK(delegate_task_runner_->BelongsToCurrentThread());
+ DCHECK(delegate_task_runner_->RunsTasksInCurrentSequence());
std::uint16_t close_code = static_cast<std::uint16_t>(error_code);
@@ -242,7 +244,7 @@
scoped_refptr<net::IOBuffer> io_buffer(new net::IOBuffer(length));
memcpy(io_buffer->data(), data, length);
- if (delegate_task_runner_->BelongsToCurrentThread()) {
+ if (delegate_task_runner_->RunsTasksInCurrentSequence()) {
SendOnDelegateThread(op_code, std::move(io_buffer), length);
} else {
base::Closure do_send_closure(
@@ -258,7 +260,7 @@
void WebSocketImpl::SendOnDelegateThread(
const net::WebSocketFrameHeader::OpCode op_code,
scoped_refptr<net::IOBuffer> io_buffer, std::size_t length) {
- DCHECK(delegate_task_runner_->BelongsToCurrentThread());
+ DCHECK(delegate_task_runner_->RunsTasksInCurrentSequence());
if (!websocket_channel_) {
DLOG(WARNING) << "Attempt to send over a closed channel.";
@@ -270,7 +272,7 @@
}
void WebSocketImpl::ProcessSendQueue() {
- DCHECK(delegate_task_runner_->BelongsToCurrentThread());
+ DCHECK(delegate_task_runner_->RunsTasksInCurrentSequence());
while (current_quota_ > 0 && !send_queue_.empty()) {
SendQueueMessage message = send_queue_.front();
size_t current_message_length = message.length - sent_size_of_top_message_;
@@ -281,8 +283,8 @@
scoped_refptr<net::IOBuffer> new_io_buffer(
new net::IOBuffer(static_cast<size_t>(current_quota_)));
memcpy(new_io_buffer->data(),
- message.io_buffer->data() + sent_size_of_top_message_,
- current_quota_);
+ message.io_buffer->data() + sent_size_of_top_message_,
+ current_quota_);
sent_size_of_top_message_ += current_quota_;
message.io_buffer = new_io_buffer;
current_message_length = current_quota_;
diff --git a/cobalt/websocket/web_socket_impl.h b/cobalt/websocket/web_socket_impl.h
index 6c65620..f2842a0 100644
--- a/cobalt/websocket/web_socket_impl.h
+++ b/cobalt/websocket/web_socket_impl.h
@@ -23,8 +23,8 @@
#include "base/compiler_specific.h"
#include "base/memory/ref_counted.h"
#include "base/optional.h"
-#include "base/single_thread_task_runner.h"
-#include "base/threading/thread_checker.h"
+#include "base/sequence_checker.h"
+#include "base/sequenced_task_runner.h"
#include "cobalt/network/network_module.h"
#include "cobalt/websocket/buffered_amount_tracker.h"
#include "cobalt/websocket/cobalt_web_socket_event_handler.h"
@@ -123,7 +123,7 @@
void ResetChannel();
- THREAD_CHECKER(thread_checker_);
+ SEQUENCE_CHECKER(sequence_checker_);
std::vector<std::string> desired_sub_protocols_;
network::NetworkModule* network_module_;
@@ -143,8 +143,8 @@
std::queue<SendQueueMessage> send_queue_;
size_t sent_size_of_top_message_ = 0;
- scoped_refptr<base::SingleThreadTaskRunner> delegate_task_runner_;
- scoped_refptr<base::SingleThreadTaskRunner> owner_task_runner_;
+ scoped_refptr<base::SequencedTaskRunner> delegate_task_runner_;
+ scoped_refptr<base::SequencedTaskRunner> owner_task_runner_;
~WebSocketImpl();
friend class base::RefCountedThreadSafe<WebSocketImpl>;
diff --git a/cobalt/websocket/web_socket_impl_test.cc b/cobalt/websocket/web_socket_impl_test.cc
index d7ddb7b..a750436 100644
--- a/cobalt/websocket/web_socket_impl_test.cc
+++ b/cobalt/websocket/web_socket_impl_test.cc
@@ -107,7 +107,7 @@
}
std::unique_ptr<web::testing::StubWebContext> web_context_;
- scoped_refptr<base::SingleThreadTaskRunner> network_task_runner_;
+ scoped_refptr<base::SequencedTaskRunner> network_task_runner_;
scoped_refptr<WebSocketImpl> websocket_impl_;
MockWebSocketChannel* mock_channel_;
StrictMock<MockExceptionState> exception_state_;
diff --git a/cobalt/worker/clients.cc b/cobalt/worker/clients.cc
index c7c7f1a..c78e12a 100644
--- a/cobalt/worker/clients.cc
+++ b/cobalt/worker/clients.cc
@@ -22,6 +22,7 @@
#include "base/memory/ref_counted.h"
#include "base/message_loop/message_loop.h"
#include "base/task_runner.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "base/trace_event/trace_event.h"
#include "cobalt/base/polymorphic_downcast.h"
#include "cobalt/script/environment_settings.h"
@@ -139,7 +140,7 @@
service_worker->state() != kServiceWorkerStateActivating);
// Perform the rest of the steps in a task, because the promise has to be
// returned before we can safely reject it.
- base::MessageLoop::current()->task_runner()->PostTask(
+ base::ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE,
base::BindOnce(
[](std::unique_ptr<script::ValuePromiseVoid::Reference>
diff --git a/cobalt/worker/dedicated_worker.cc b/cobalt/worker/dedicated_worker.cc
index 43f2b0d..624dbec 100644
--- a/cobalt/worker/dedicated_worker.cc
+++ b/cobalt/worker/dedicated_worker.cc
@@ -94,6 +94,7 @@
// 1. Run a worker given worker, worker URL, outside settings, outside
// port, and options.
options.outside_context = environment_settings()->context();
+ options.outside_event_target = this;
options.outside_port = outside_port_.get();
options.options = worker_options_;
options.web_options.service_worker_jobs =
diff --git a/cobalt/worker/fetch_event.cc b/cobalt/worker/fetch_event.cc
index 99497fb..57c9227 100644
--- a/cobalt/worker/fetch_event.cc
+++ b/cobalt/worker/fetch_event.cc
@@ -17,6 +17,7 @@
#include <memory>
#include <utility>
+#include "base/threading/thread_task_runner_handle.h"
#include "cobalt/script/v8c/conversion_helpers.h"
#include "cobalt/script/v8c/v8c_value_handle.h"
#include "cobalt/web/cache_utils.h"
@@ -29,13 +30,13 @@
const std::string& type,
const FetchEventInit& event_init_dict)
: FetchEvent(environment_settings, base::Token(type), event_init_dict,
- base::MessageLoop::current()->task_runner(),
- RespondWithCallback(), ReportLoadTimingInfo()) {}
+ base::ThreadTaskRunnerHandle::Get(), RespondWithCallback(),
+ ReportLoadTimingInfo()) {}
FetchEvent::FetchEvent(
script::EnvironmentSettings* environment_settings, base::Token type,
const FetchEventInit& event_init_dict,
- scoped_refptr<base::SingleThreadTaskRunner> callback_task_runner,
+ scoped_refptr<base::SequencedTaskRunner> callback_task_runner,
RespondWithCallback respond_with_callback,
ReportLoadTimingInfo report_load_timing_info)
: ExtendableEvent(environment_settings, type, event_init_dict),
@@ -89,8 +90,7 @@
->PostTask(
FROM_HERE,
base::BindOnce(
- [](scoped_refptr<base::SingleThreadTaskRunner>
- callback_task_runner,
+ [](scoped_refptr<base::SequencedTaskRunner> callback_task_runner,
RespondWithCallback respond_with_callback, std::string body,
base::MessageLoop* loop, base::OnceClosure callback) {
callback_task_runner->PostTask(
diff --git a/cobalt/worker/fetch_event.h b/cobalt/worker/fetch_event.h
index 2b129c0..fe42571 100644
--- a/cobalt/worker/fetch_event.h
+++ b/cobalt/worker/fetch_event.h
@@ -41,7 +41,7 @@
const FetchEventInit& event_init_dict);
FetchEvent(script::EnvironmentSettings*, base::Token type,
const FetchEventInit& event_init_dict,
- scoped_refptr<base::SingleThreadTaskRunner> callback_task_runner,
+ scoped_refptr<base::SequencedTaskRunner> callback_task_runner,
RespondWithCallback respond_with_callback,
ReportLoadTimingInfo report_load_timing_info);
~FetchEvent() override = default;
@@ -67,7 +67,7 @@
void RespondWithDone();
script::EnvironmentSettings* environment_settings_;
- scoped_refptr<base::SingleThreadTaskRunner> callback_task_runner_;
+ scoped_refptr<base::SequencedTaskRunner> callback_task_runner_;
RespondWithCallback respond_with_callback_;
ReportLoadTimingInfo report_load_timing_info_;
std::unique_ptr<script::ValueHandleHolder::Reference> request_;
diff --git a/cobalt/worker/service_worker_container.cc b/cobalt/worker/service_worker_container.cc
index 9350212..b392f5c 100644
--- a/cobalt/worker/service_worker_container.cc
+++ b/cobalt/worker/service_worker_container.cc
@@ -21,6 +21,7 @@
#include "base/message_loop/message_loop.h"
#include "base/optional.h"
#include "base/task_runner.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "base/trace_event/trace_event.h"
#include "cobalt/dom/dom_settings.h"
#include "cobalt/script/promise.h"
@@ -152,7 +153,7 @@
}
// 6. Invoke Start Register with scopeURL, scriptURL, p, client, client’s
// creation URL, options["type"], and options["updateViaCache"].
- base::MessageLoop::current()->task_runner()->PostTask(
+ base::ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE,
base::BindOnce(&ServiceWorkerJobs::StartRegister,
base::Unretained(client->service_worker_jobs()), scope_url,
@@ -182,7 +183,7 @@
new script::ValuePromiseWrappable::Reference(
environment_settings()->context()->GetWindowOrWorkerGlobalScope(),
promise));
- base::MessageLoop::current()->task_runner()->PostTask(
+ base::ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE, base::BindOnce(&ServiceWorkerContainer::GetRegistrationTask,
base::Unretained(this), url,
std::move(promise_reference)));
@@ -255,7 +256,7 @@
promise_reference(new script::ValuePromiseSequenceWrappable::Reference(
environment_settings()->context()->GetWindowOrWorkerGlobalScope(),
promise));
- base::MessageLoop::current()->task_runner()->PostTask(
+ base::ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE,
base::BindOnce(&ServiceWorkerContainer::GetRegistrationsTask,
base::Unretained(this), std::move(promise_reference)));
diff --git a/cobalt/worker/service_worker_global_scope.cc b/cobalt/worker/service_worker_global_scope.cc
index 1598b81..06b7dae 100644
--- a/cobalt/worker/service_worker_global_scope.cc
+++ b/cobalt/worker/service_worker_global_scope.cc
@@ -20,7 +20,7 @@
#include "base/bind.h"
#include "base/logging.h"
#include "base/message_loop/message_loop.h"
-#include "base/single_thread_task_runner.h"
+#include "base/sequenced_task_runner.h"
#include "base/trace_event/trace_event.h"
#include "cobalt/script/environment_settings.h"
#include "cobalt/script/exception_state.h"
@@ -196,7 +196,7 @@
void ServiceWorkerGlobalScope::StartFetch(
const GURL& url, bool main_resource,
const net::HttpRequestHeaders& request_headers,
- scoped_refptr<base::SingleThreadTaskRunner> callback_task_runner,
+ scoped_refptr<base::SequencedTaskRunner> callback_task_runner,
base::OnceCallback<void(std::unique_ptr<std::string>)> callback,
base::OnceCallback<void(const net::LoadTimingInfo&)>
report_load_timing_info,
diff --git a/cobalt/worker/service_worker_global_scope.h b/cobalt/worker/service_worker_global_scope.h
index a32ba66..5da2e11 100644
--- a/cobalt/worker/service_worker_global_scope.h
+++ b/cobalt/worker/service_worker_global_scope.h
@@ -77,7 +77,7 @@
void StartFetch(
const GURL& url, bool main_resource,
const net::HttpRequestHeaders& request_headers,
- scoped_refptr<base::SingleThreadTaskRunner> callback_task_runner,
+ scoped_refptr<base::SequencedTaskRunner> callback_task_runner,
base::OnceCallback<void(std::unique_ptr<std::string>)> callback,
base::OnceCallback<void(const net::LoadTimingInfo&)>
report_load_timing_info,
diff --git a/cobalt/worker/service_worker_jobs.cc b/cobalt/worker/service_worker_jobs.cc
index 6b332d0..8288cf9 100644
--- a/cobalt/worker/service_worker_jobs.cc
+++ b/cobalt/worker/service_worker_jobs.cc
@@ -31,6 +31,7 @@
#include "base/strings/stringprintf.h"
#include "base/synchronization/lock.h"
#include "base/task_runner.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "base/time/time.h"
#include "base/trace_event/trace_event.h"
#include "cobalt/base/tokens.h"
@@ -1000,7 +1001,7 @@
// Post a task for the remaining steps, to let tasks posted by
// RunServiceWorker, such as for registering the web context, execute first.
- base::MessageLoop::current()->task_runner()->PostTask(
+ base::ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE, base::Bind(&ServiceWorkerJobs::UpdateOnRunServiceWorker,
base::Unretained(this), std::move(state),
std::move(worker), run_result_is_success));
diff --git a/cobalt/worker/service_worker_object.cc b/cobalt/worker/service_worker_object.cc
index b16feb6..9fcbbd1 100644
--- a/cobalt/worker/service_worker_object.cc
+++ b/cobalt/worker/service_worker_object.cc
@@ -233,8 +233,8 @@
if (!service_worker_global_scope->csp_delegate()->OnReceiveHeaders(
csp_headers)) {
// https://www.w3.org/TR/service-workers/#content-security-policy
- DLOG(WARNING) << "Warning: No Content Security Header received for the "
- "service worker.";
+ LOG(WARNING) << "Warning: No Content Security Header received for the "
+ "service worker.";
}
web_context_->SetupFinished();
// 8.11. If serviceWorker is an active worker, and there are any tasks queued
@@ -298,8 +298,8 @@
// set of event types to handle remains an empty set. The user agents are
// encouraged to show a warning that the event listeners must be added on
// the very first evaluation of the worker script.
- DLOG(WARNING) << "ServiceWorkerGlobalScope's event listeners must be "
- "added on the first evaluation of the worker script.";
+ LOG(WARNING) << "ServiceWorkerGlobalScope's event listeners must be "
+ "added on the first evaluation of the worker script.";
}
event_types.clear();
}
diff --git a/cobalt/worker/service_worker_persistent_settings.cc b/cobalt/worker/service_worker_persistent_settings.cc
index ba47191..41a689d 100644
--- a/cobalt/worker/service_worker_persistent_settings.cc
+++ b/cobalt/worker/service_worker_persistent_settings.cc
@@ -90,9 +90,6 @@
ServiceWorkerConsts::kSettingsJson));
persistent_settings_->ValidatePersistentSettings();
DCHECK(persistent_settings_);
-
- cache_.reset(cobalt::cache::Cache::GetInstance());
- DCHECK(cache_);
}
void ServiceWorkerPersistentSettings::ReadServiceWorkerRegistrationMapSettings(
@@ -243,9 +240,10 @@
if (script_url_value.is_string()) {
auto script_url_string = script_url_value.GetString();
auto script_url = GURL(script_url_string);
- std::unique_ptr<std::vector<uint8_t>> data = cache_->Retrieve(
- disk_cache::ResourceType::kServiceWorkerScript,
- web::cache_utils::GetKey(key_string + script_url_string));
+ std::unique_ptr<std::vector<uint8_t>> data =
+ cobalt::cache::Cache::GetInstance()->Retrieve(
+ disk_cache::ResourceType::kServiceWorkerScript,
+ web::cache_utils::GetKey(key_string + script_url_string));
if (data == nullptr) {
return false;
}
@@ -382,7 +380,7 @@
// Use Cache::Store to persist the script resource.
std::string resource = *(script_resource.second.content.get());
std::vector<uint8_t> data(resource.begin(), resource.end());
- cache_->Store(
+ cobalt::cache::Cache::GetInstance()->Store(
disk_cache::ResourceType::kServiceWorkerScript,
web::cache_utils::GetKey(registration_key_string + script_url_string),
data,
@@ -448,7 +446,7 @@
auto script_url_value = std::move(script_urls_list[i]);
if (script_url_value.is_string()) {
auto script_url_string = script_url_value.GetString();
- cache_->Delete(
+ cobalt::cache::Cache::GetInstance()->Delete(
disk_cache::ResourceType::kServiceWorkerScript,
web::cache_utils::GetKey(key_string + script_url_string));
}
diff --git a/cobalt/worker/service_worker_persistent_settings.h b/cobalt/worker/service_worker_persistent_settings.h
index efda66f..1d4fd2e 100644
--- a/cobalt/worker/service_worker_persistent_settings.h
+++ b/cobalt/worker/service_worker_persistent_settings.h
@@ -94,8 +94,6 @@
persistent_settings_;
std::set<std::string> key_set_;
-
- std::unique_ptr<cobalt::cache::Cache> cache_;
};
} // namespace worker
diff --git a/cobalt/worker/service_worker_registration.cc b/cobalt/worker/service_worker_registration.cc
index 4e30763..adba128 100644
--- a/cobalt/worker/service_worker_registration.cc
+++ b/cobalt/worker/service_worker_registration.cc
@@ -19,6 +19,7 @@
#include "base/logging.h"
#include "base/message_loop/message_loop.h"
#include "base/task_runner.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "base/trace_event/trace_event.h"
#include "cobalt/script/environment_settings.h"
#include "cobalt/web/context.h"
@@ -55,7 +56,7 @@
promise));
// Perform the rest of the steps in a task, because the promise has to be
// returned before we can safely reject or resolve it.
- base::MessageLoop::current()->task_runner()->PostTask(
+ base::ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE,
base::BindOnce(&ServiceWorkerRegistration::UpdateTask,
base::Unretained(this), std::move(promise_reference)));
@@ -142,7 +143,7 @@
// Perform the rest of the steps in a task, so that unregister doesn't race
// past any previously submitted update requests.
- base::MessageLoop::current()->task_runner()->PostTask(
+ base::ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE,
base::BindOnce(
[](worker::ServiceWorkerJobs* jobs, const url::Origin& storage_key,
diff --git a/cobalt/worker/worker.cc b/cobalt/worker/worker.cc
index 5e9d507..13fa24b 100644
--- a/cobalt/worker/worker.cc
+++ b/cobalt/worker/worker.cc
@@ -73,7 +73,6 @@
worker_global_scope_ = nullptr;
message_port_ = nullptr;
content_.reset();
- error_.reset();
}
Worker::~Worker() { Abort(); }
@@ -192,6 +191,23 @@
base::Bind(&Worker::OnLoadingComplete, base::Unretained(this)));
}
+void Worker::SendErrorEventToOutside(const std::string& message) {
+ LOG(WARNING) << "Worker loading failed : " << message;
+ options_.outside_context->message_loop()->task_runner()->PostTask(
+ FROM_HERE,
+ base::Bind(
+ [](base::WeakPtr<web::EventTarget> event_target,
+ const std::string& message, const std::string& filename) {
+ web::ErrorEventInit error;
+ error.set_message(message);
+ error.set_filename(filename);
+ event_target->DispatchEvent(new web::ErrorEvent(
+ event_target->environment_settings(), error));
+ },
+ base::AsWeakPtr(options_.outside_event_target), message,
+ web_context_->environment_settings()->creation_url().spec()));
+}
+
void Worker::OnContentProduced(const loader::Origin& last_url_origin,
std::unique_ptr<std::string> content) {
// Algorithm for 'run a worker'
@@ -209,35 +225,13 @@
void Worker::OnLoadingComplete(const base::Optional<std::string>& error) {
// Algorithm for 'run a worker'
// https://html.spec.whatwg.org/commit-snapshots/465a6b672c703054de278b0f8133eb3ad33d93f4/#run-a-worker
- error_ = error;
// If the algorithm asynchronously completes with null or with a script
// whose error to rethrow is non-null, then:
- if (error_ || !content_) {
+ if (error || !content_) {
// 1. Queue a global task on the DOM manipulation task source given
// worker's relevant global object to fire an event named error at
// worker.
- options_.outside_context->message_loop()->task_runner()->PostTask(
- FROM_HERE,
- base::BindOnce(
- [](web::WindowOrWorkerGlobalScope* global_scope,
- const base::Optional<std::string>& message,
- const base::SourceLocation& location) {
- web::ErrorEventInit error;
- error.set_message(message.value_or("No content for worker."));
- error.set_filename(location.file_path);
- error.set_lineno(location.line_number);
- error.set_colno(location.column_number);
- global_scope->DispatchEvent(new web::ErrorEvent(
- global_scope->environment_settings(), error));
- },
- base::Unretained(
- options_.outside_context->GetWindowOrWorkerGlobalScope()),
- error, options_.construction_location));
- if (error_) {
- LOG(WARNING) << "Script loading failed : " << *error;
- } else {
- LOG(WARNING) << "Script loading failed : no content received.";
- }
+ SendErrorEventToOutside(error.value_or("No content for worker."));
// 2. Run the environment discarding steps for inside settings.
// 3. Return.
return;
@@ -285,19 +279,7 @@
std::string retval = web_context_->script_runner()->Execute(
content, script_location, mute_errors, &succeeded);
if (!succeeded) {
- options_.outside_context->message_loop()->task_runner()->PostTask(
- FROM_HERE,
- base::BindOnce(
- [](web::Context* context, const std::string& message,
- const std::string& filename) {
- web::ErrorEventInit error;
- error.set_message(message);
- error.set_filename(filename);
- context->GetWindowOrWorkerGlobalScope()->DispatchEvent(
- new web::ErrorEvent(context->environment_settings(), error));
- },
- options_.outside_context, retval,
- web_context_->environment_settings()->creation_url().spec()));
+ SendErrorEventToOutside(retval);
}
// 24. Enable outside port's port message queue.
diff --git a/cobalt/worker/worker.h b/cobalt/worker/worker.h
index 9d8d779..d37d462 100644
--- a/cobalt/worker/worker.h
+++ b/cobalt/worker/worker.h
@@ -69,6 +69,7 @@
// https://html.spec.whatwg.org/commit-snapshots/465a6b672c703054de278b0f8133eb3ad33d93f4/#dom-worker
GURL url;
web::Context* outside_context = nullptr;
+ web::EventTarget* outside_event_target;
web::MessagePort* outside_port = nullptr;
WorkerOptions options;
};
@@ -97,6 +98,9 @@
// thread.
void Initialize(web::Context* context);
+ // Send an error event to the outside object.
+ void SendErrorEventToOutside(const std::string& message);
+
void OnContentProduced(const loader::Origin& last_url_origin,
std::unique_ptr<std::string> content);
void OnLoadingComplete(const base::Optional<std::string>& error);
@@ -135,9 +139,6 @@
// Content of the script. Released after Execute is called.
std::unique_ptr<std::string> content_;
- // If the script failed, contains the error message.
- base::Optional<std::string> error_;
-
// The execution ready flag.
// https://html.spec.whatwg.org/commit-snapshots/465a6b672c703054de278b0f8133eb3ad33d93f4/#concept-environment-execution-ready-flag
base::WaitableEvent execution_ready_ = {
diff --git a/cobalt/worker/worker_global_scope.cc b/cobalt/worker/worker_global_scope.cc
index 33c7b86..0f2372b 100644
--- a/cobalt/worker/worker_global_scope.cc
+++ b/cobalt/worker/worker_global_scope.cc
@@ -154,7 +154,7 @@
if (!SbAtomicNoBarrier_Increment(&number_of_loads_, -1)) {
// Clear the loader factory after this callback
// completes.
- base::MessageLoop::current()->task_runner()->PostTask(
+ base::ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE, base::BindOnce(
[](base::WaitableEvent* load_finished_) {
load_finished_->Signal();
diff --git a/cobalt/xhr/BUILD.gn b/cobalt/xhr/BUILD.gn
index 17ad212..c2bd6fc 100644
--- a/cobalt/xhr/BUILD.gn
+++ b/cobalt/xhr/BUILD.gn
@@ -41,6 +41,7 @@
"//cobalt/base",
"//cobalt/dom_parser",
"//cobalt/loader",
+ "//cobalt/network",
"//cobalt/script",
"//cobalt/web",
"//nb",
diff --git a/cobalt/xhr/xml_http_request.cc b/cobalt/xhr/xml_http_request.cc
index edd864a..8de549a 100644
--- a/cobalt/xhr/xml_http_request.cc
+++ b/cobalt/xhr/xml_http_request.cc
@@ -23,6 +23,7 @@
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_util.h"
#include "base/task_runner.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "base/time/time.h"
#include "cobalt/base/polymorphic_downcast.h"
#include "cobalt/base/source_location.h"
@@ -36,6 +37,7 @@
#include "cobalt/loader/fetch_interceptor_coordinator.h"
#include "cobalt/loader/fetcher_factory.h"
#include "cobalt/loader/url_fetcher_string_writer.h"
+#include "cobalt/network/network_module.h"
#include "cobalt/script/global_environment.h"
#include "cobalt/script/javascript_engine.h"
#include "cobalt/web/context.h"
@@ -344,6 +346,12 @@
XMLHttpRequestImpl::XMLHttpRequestImpl(XMLHttpRequest* xhr)
: error_(false),
is_cross_origin_(false),
+ cors_policy_(xhr->environment_settings()
+ ->context()
+ ->fetcher_factory()
+ ->network_module()
+ ->network_delegate()
+ ->cors_policy()),
is_data_url_(false),
is_redirect_(false),
method_(net::URLFetcher::GET),
@@ -366,7 +374,7 @@
sent_(false),
settings_(xhr->environment_settings()),
stop_timeout_(false),
- task_runner_(base::MessageLoop::current()->task_runner()),
+ task_runner_(base::ThreadTaskRunnerHandle::Get()),
timeout_ms_(0),
upload_complete_(false) {
DCHECK(environment_settings());
@@ -534,7 +542,7 @@
if (will_destroy_current_message_loop_.load()) {
return;
}
- if (task_runner_ != base::MessageLoop::current()->task_runner()) {
+ if (task_runner_ != base::ThreadTaskRunnerHandle::Get()) {
task_runner_->PostTask(FROM_HERE,
base::BindOnce(&XMLHttpRequestImpl::SendIntercepted,
AsWeakPtr(), std::move(response)));
@@ -602,7 +610,7 @@
if (will_destroy_current_message_loop_.load()) {
return;
}
- if (task_runner_ != base::MessageLoop::current()->task_runner()) {
+ if (task_runner_ != base::ThreadTaskRunnerHandle::Get()) {
task_runner_->PostTask(
FROM_HERE,
base::BindOnce(&XMLHttpRequestImpl::SendFallback,
@@ -918,7 +926,7 @@
if (is_cross_origin_) {
if (!loader::CORSPreflight::CORSCheck(*http_response_headers_,
origin_.SerializedOrigin(),
- with_credentials_)) {
+ with_credentials_, cors_policy_)) {
HandleRequestError(XMLHttpRequest::kNetworkError);
return;
}
@@ -1177,7 +1185,7 @@
// CORS check for the received response
if (is_cross_origin_) {
if (!loader::CORSPreflight::CORSCheck(headers, origin_.SerializedOrigin(),
- with_credentials_)) {
+ with_credentials_, cors_policy_)) {
HandleRequestError(XMLHttpRequest::kNetworkError);
return;
}
@@ -1416,6 +1424,7 @@
// Don't retry, let the caller deal with it.
url_fetcher_->SetAutomaticallyRetryOn5xx(false);
url_fetcher_->SetExtraRequestHeaders(request_headers_.ToString());
+ network_module->AddClientHintHeaders(*url_fetcher_);
// We want to do cors check and preflight during redirects
url_fetcher_->SetStopOnRedirect(true);
@@ -1452,6 +1461,7 @@
->GetWindowOrWorkerGlobalScope()
->get_preflight_cache()));
corspreflight_->set_headers(request_headers_);
+ corspreflight_->set_cors_policy(cors_policy_);
// For cross-origin requests, don't send or save auth data / cookies unless
// withCredentials was set.
// To make a cross-origin request, add origin, referrer source, credentials,
@@ -1526,7 +1536,7 @@
void XMLHttpRequestImpl::StartURLFetcher(const SbTime max_artificial_delay,
const int url_fetcher_generation) {
if (max_artificial_delay > 0) {
- base::MessageLoop::current()->task_runner()->PostDelayedTask(
+ base::ThreadTaskRunnerHandle::Get()->PostDelayedTask(
FROM_HERE,
base::Bind(&XMLHttpRequestImpl::StartURLFetcher, base::Unretained(this),
0, url_fetcher_generation_),
diff --git a/cobalt/xhr/xml_http_request.h b/cobalt/xhr/xml_http_request.h
index 7b83f57..5a5275e 100644
--- a/cobalt/xhr/xml_http_request.h
+++ b/cobalt/xhr/xml_http_request.h
@@ -337,6 +337,7 @@
// All members requiring initialization are grouped below.
bool error_;
bool is_cross_origin_;
+ network::CORSPolicy cors_policy_;
bool is_data_url_;
bool is_redirect_;
net::URLFetcher::RequestType method_;
@@ -444,7 +445,7 @@
bool sent_;
web::EnvironmentSettings* const settings_;
bool stop_timeout_;
- scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
+ scoped_refptr<base::SequencedTaskRunner> task_runner_;
uint32 timeout_ms_;
bool upload_complete_;
diff --git a/components/crash/core/common/BUILD.gn b/components/crash/core/common/BUILD.gn
new file mode 100644
index 0000000..e5d44d3
--- /dev/null
+++ b/components/crash/core/common/BUILD.gn
@@ -0,0 +1,154 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//build/buildflag_header.gni")
+
+declare_args() {
+ # If set to true, this will stub out and disable the entire crash key system.
+ use_crash_key_stubs = is_fuchsia
+}
+
+group("common") {
+ public_deps = [
+ ":crash_key",
+ ":crash_key_utils",
+ ]
+
+ if (is_mac || is_ios) {
+ public_deps += [ ":zombies" ]
+ }
+}
+
+use_crashpad_annotation = (is_mac || is_win) && !use_crash_key_stubs
+
+buildflag_header("crash_buildflags") {
+ header = "crash_buildflags.h"
+ flags = [
+ "USE_CRASHPAD_ANNOTATION=$use_crashpad_annotation",
+ "USE_CRASH_KEY_STUBS=$use_crash_key_stubs",
+ ]
+}
+
+# Crashpad's annotation system can store data on a per-module basis (i.e.,
+# in different shared libraries in the component build) without issue. The
+# Breakpad implementation uses a static global variable, so ensure there is
+# only one instance of the symbol in the component build by making this
+# target a component.
+if (use_crash_key_stubs || use_crashpad_annotation) {
+ crash_key_target_type = "static_library"
+} else {
+ crash_key_target_type = "component"
+}
+target(crash_key_target_type, "crash_key") {
+ sources = [
+ "crash_export.h",
+ "crash_key.cc",
+ "crash_key.h",
+ "crash_key_base_support.cc",
+ "crash_key_base_support.h",
+ ]
+
+ defines = []
+
+ # This target is not always a component, depending on the implementation.
+ # When it is not a component, annotating functions with the standard
+ # CRASH_EXPORT macro causes linking errors on Windows (clients of this target
+ # expect it to be dllimport but it is linked statically). Instead, provide a
+ # wrapper macro CRASH_KEY_EXPORT that only evaluates to CRASH_EXPORT if this
+ # target is really a component.
+ if (crash_key_target_type == "component") {
+ defines += [
+ "CRASH_KEY_EXPORT=CRASH_EXPORT",
+ "CRASH_CORE_COMMON_IMPLEMENTATION",
+ ]
+ }
+
+ deps = [
+ ":crash_buildflags",
+ "//base",
+ ]
+
+ if (use_crash_key_stubs || use_cobalt_customizations) {
+ sources += [ "crash_key_stubs.cc" ]
+ } else if (use_crashpad_annotation) {
+ sources += [ "crash_key_crashpad.cc" ]
+ deps += [ "//third_party/crashpad/crashpad/client" ]
+ } else {
+ include_dirs = [ "//third_party/breakpad/breakpad/src" ]
+
+ if (is_ios) {
+ sources += [ "crash_key_breakpad_ios.mm" ]
+
+ configs += [ "//build/config/compiler:enable_arc" ]
+ } else {
+ sources += [
+ "crash_key_breakpad.cc",
+ "crash_key_internal.h",
+ ]
+ }
+
+ deps += [ "//third_party/breakpad:client" ]
+ }
+}
+
+static_library("crash_key_utils") {
+ visibility = [ ":*" ]
+
+ sources = [
+ "crash_keys.cc",
+ "crash_keys.h",
+ ]
+
+ deps = [
+ ":crash_key",
+ "//base",
+ ]
+}
+
+if (is_mac || is_ios) {
+ component("zombies") {
+ visibility = [ ":common" ]
+
+ sources = [
+ "objc_zombie.h",
+ "objc_zombie.mm",
+ ]
+
+ defines = [ "CRASH_CORE_COMMON_IMPLEMENTATION" ]
+
+ deps = [
+ ":crash_key",
+ "//base",
+ ]
+
+ libs = [ "Foundation.framework" ]
+ }
+}
+
+source_set("unit_tests") {
+ testonly = true
+ sources = [
+ "crash_key_unittest.cc",
+ "crash_keys_unittest.cc",
+ ]
+
+ deps = [
+ ":common",
+ "//base",
+ "//testing/gtest",
+ ]
+
+ if (is_mac || is_ios) {
+ sources += [ "objc_zombie_unittest.mm" ]
+ }
+
+ if (!is_mac && !is_win && !is_fuchsia) {
+ include_dirs = [ "//third_party/breakpad/breakpad/src/" ]
+ sources += [ "crash_key_breakpad_unittest.cc" ]
+ }
+
+ if (is_fuchsia) {
+ sources -= [ "crash_key_unittest.cc" ]
+ }
+}
diff --git a/components/crash/core/common/DEPS b/components/crash/core/common/DEPS
new file mode 100644
index 0000000..ef81573
--- /dev/null
+++ b/components/crash/core/common/DEPS
@@ -0,0 +1,4 @@
+include_rules = [
+ "+third_party/breakpad/breakpad/src/client/ios/Breakpad.h",
+ "+third_party/breakpad/breakpad/src/client/ios/BreakpadController.h",
+]
diff --git a/components/crash/core/common/METADATA b/components/crash/core/common/METADATA
new file mode 100644
index 0000000..554e14d
--- /dev/null
+++ b/components/crash/core/common/METADATA
@@ -0,0 +1,21 @@
+name: "common"
+description:
+ "Filtered subtree at components/crash/core/common."
+
+third_party {
+ url {
+ type: LOCAL_SOURCE
+ value: "https://cobalt.googlesource.com/components/crash/core/common_filtered_mirror"
+ }
+ url {
+ type: GIT
+ value: "https://github.com/chromium/chromium"
+ }
+ # Closest commit hash to m70.
+ version: "15915dc41b2c3f0ff00a744d60a6f6cf25f74a8c"
+ last_upgrade_date {
+ year: 2018
+ month: 03
+ day: 30
+ }
+}
diff --git a/components/crash/core/common/crash_export.h b/components/crash/core/common/crash_export.h
new file mode 100644
index 0000000..84fac02
--- /dev/null
+++ b/components/crash/core/common/crash_export.h
@@ -0,0 +1,34 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_CRASH_CORE_COMMON_CRASH_EXPORT_H_
+#define COMPONENTS_CRASH_CORE_COMMON_CRASH_EXPORT_H_
+
+#if defined(COMPONENT_BUILD)
+#if defined(WIN32)
+
+#if defined(CRASH_CORE_COMMON_IMPLEMENTATION)
+#define CRASH_EXPORT __declspec(dllexport)
+#else
+#define CRASH_EXPORT __declspec(dllimport)
+#endif // defined(CRASH_CORE_COMMON_IMPLEMENTATION)
+
+#else // defined(WIN32)
+#if defined(CRASH_CORE_COMMON_IMPLEMENTATION)
+#define CRASH_EXPORT __attribute__((visibility("default")))
+#else
+#define CRASH_EXPORT
+#endif // defined(CRASH_CORE_COMMON_IMPLEMENTATION)
+#endif
+
+#else // defined(COMPONENT_BUILD)
+#define CRASH_EXPORT
+#endif
+
+// See BUILD.gn :crash_key target for the declaration.
+#if !defined(CRASH_KEY_EXPORT)
+#define CRASH_KEY_EXPORT
+#endif
+
+#endif // COMPONENTS_CRASH_CORE_COMMON_CRASH_EXPORT_H_
diff --git a/components/crash/core/common/crash_key.cc b/components/crash/core/common/crash_key.cc
new file mode 100644
index 0000000..1e2952d
--- /dev/null
+++ b/components/crash/core/common/crash_key.cc
@@ -0,0 +1,35 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/crash/core/common/crash_key.h"
+
+#include "base/format_macros.h"
+#include "base/strings/stringprintf.h"
+
+namespace crash_reporter {
+namespace internal {
+
+std::string FormatStackTrace(const base::debug::StackTrace& trace,
+ size_t max_length) {
+ size_t count = 0;
+ const void* const* addresses = trace.Addresses(&count);
+
+ std::string value;
+ for (size_t i = 0; i < count; ++i) {
+ std::string address = base::StringPrintf(
+ "0x%" PRIx64, reinterpret_cast<uint64_t>(addresses[i]));
+ if (value.size() + address.size() > max_length)
+ break;
+ value += address + " ";
+ }
+
+ if (!value.empty() && value.back() == ' ') {
+ value.resize(value.size() - 1);
+ }
+
+ return value;
+}
+
+} // namespace internal
+} // namespace crash_reporter
diff --git a/components/crash/core/common/crash_key.h b/components/crash/core/common/crash_key.h
new file mode 100644
index 0000000..c96108a
--- /dev/null
+++ b/components/crash/core/common/crash_key.h
@@ -0,0 +1,229 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_CRASH_CORE_COMMON_CRASH_KEY_H_
+#define COMPONENTS_CRASH_CORE_COMMON_CRASH_KEY_H_
+
+#include <stdint.h>
+
+#include <string>
+
+#include "base/debug/stack_trace.h"
+#include "base/macros.h"
+#include "base/strings/string_piece.h"
+#include "build/build_config.h"
+#include "components/crash/core/common/crash_buildflags.h"
+#include "components/crash/core/common/crash_export.h"
+
+// The crash key interface exposed by this file is the same as the Crashpad
+// Annotation interface. Because not all platforms use Crashpad yet, a
+// source-compatible interface is provided on top of the older Breakpad
+// storage mechanism.
+#if BUILDFLAG(USE_CRASHPAD_ANNOTATION)
+#include "third_party/crashpad/crashpad/client/annotation.h" // nogncheck
+#endif
+
+namespace crash_reporter {
+
+class CrashKeyBreakpadTest;
+
+// A CrashKeyString stores a name-value pair that will be recorded within a
+// crash report.
+//
+// The crash key name must be a constant string expression, and the value
+// should be unique and identifying. The maximum size for the value is
+// specified as the template argument, and values greater than this are
+// truncated. When specifying a value size, space should be left for the
+// `NUL` byte. Crash keys should be declared with static storage duration.
+//
+// Examples:
+// \code
+// // This crash key is only set in one function:
+// void DidNavigate(const GURL& gurl) {
+// static crash_reporter::CrashKeyString<256> url_key("url");
+// url_key.Set(gurl.ToString());
+// }
+//
+// // This crash key can be set/cleared across different functions:
+// namespace {
+// crash_reporter::CrashKeyString<32> g_operation_id("operation-req-id");
+// }
+//
+// void OnStartingOperation(const std::string& request_id) {
+// g_operation_id.Set(request_id);
+// }
+//
+// void OnEndingOperation() {
+// g_operation_id.Clear()
+// }
+// \endcode
+#if BUILDFLAG(USE_CRASHPAD_ANNOTATION)
+
+template <crashpad::Annotation::ValueSizeType MaxLength>
+using CrashKeyString = crashpad::StringAnnotation<MaxLength>;
+
+#else // Crashpad-compatible crash key interface:
+
+namespace internal {
+
+constexpr size_t kCrashKeyStorageNumEntries = 200;
+constexpr size_t kCrashKeyStorageValueSize = 128;
+
+// Base implementation of a CrashKeyString for non-Crashpad clients. A separate
+// base class is used to avoid inlining complex logic into the public template
+// API.
+class CRASH_KEY_EXPORT CrashKeyStringImpl {
+ public:
+ constexpr explicit CrashKeyStringImpl(const char name[],
+ size_t* index_array,
+ size_t index_array_count)
+ : name_(name),
+ index_array_(index_array),
+ index_array_count_(index_array_count) {}
+
+ void Set(base::StringPiece value);
+ void Clear();
+
+ bool is_set() const;
+
+ private:
+ friend class crash_reporter::CrashKeyBreakpadTest;
+
+ // The name of the crash key.
+ const char* const name_;
+
+ // If the crash key is set, this is the index into the storage that can be
+ // used to set/clear the key without requiring a linear scan of the storage
+ // table. This will be |num_entries| if unset.
+ size_t* index_array_;
+ size_t index_array_count_;
+
+ DISALLOW_COPY_AND_ASSIGN(CrashKeyStringImpl);
+};
+
+// This type creates a C array that is initialized with a specific default
+// value, rather than the standard zero-initialized default.
+template <typename T,
+ size_t TotalSize,
+ T DefaultValue,
+ size_t Count,
+ T... Values>
+struct InitializedArrayImpl {
+ using Type = typename InitializedArrayImpl<T,
+ TotalSize,
+ DefaultValue,
+ Count - 1,
+ DefaultValue,
+ Values...>::Type;
+};
+
+template <typename T, size_t TotalSize, T DefaultValue, T... Values>
+struct InitializedArrayImpl<T, TotalSize, DefaultValue, 0, Values...> {
+ using Type = InitializedArrayImpl<T, TotalSize, DefaultValue, 0, Values...>;
+ T data[TotalSize]{Values...};
+};
+
+template <typename T, size_t ArraySize, T DefaultValue>
+using InitializedArray =
+ typename InitializedArrayImpl<T, ArraySize, DefaultValue, ArraySize>::Type;
+
+} // namespace internal
+
+template <uint32_t MaxLength>
+class CrashKeyString : public internal::CrashKeyStringImpl {
+ public:
+ constexpr static size_t chunk_count =
+ (MaxLength / internal::kCrashKeyStorageValueSize) + 1;
+
+ // A constructor tag that can be used to initialize a C array of crash keys.
+ enum class Tag { kArray };
+
+ constexpr explicit CrashKeyString(const char name[])
+ : internal::CrashKeyStringImpl(name, indexes_.data, chunk_count) {}
+
+ constexpr CrashKeyString(const char name[], Tag tag) : CrashKeyString(name) {}
+
+ private:
+ // Indexes into the TransitionalCrashKeyStorage for when a value is set.
+ // See the comment in CrashKeyStringImpl for details.
+ // An unset index in the storage is represented by a sentinel value, which
+ // is the total number of entries. This will initialize the array with
+ // that sentinel value as a compile-time expression.
+ internal::InitializedArray<size_t,
+ chunk_count,
+ internal::kCrashKeyStorageNumEntries>
+ indexes_;
+
+ DISALLOW_COPY_AND_ASSIGN(CrashKeyString);
+};
+
+#endif
+
+// This scoper clears the specified annotation's value when it goes out of
+// scope.
+//
+// Example:
+// void DoSomething(const std::string& data) {
+// static crash_reporter::CrashKeyString<32> crash_key("DoSomething-data");
+// crash_reporter::ScopedCrashKeyString auto_clear(&crash_key, data);
+//
+// DoSomethignImpl(data);
+// }
+class ScopedCrashKeyString {
+ public:
+#if BUILDFLAG(USE_CRASHPAD_ANNOTATION)
+ using CrashKeyType = crashpad::Annotation;
+#else
+ using CrashKeyType = internal::CrashKeyStringImpl;
+#endif
+
+ template <class T>
+ ScopedCrashKeyString(T* crash_key, base::StringPiece value)
+ : crash_key_(crash_key) {
+ crash_key->Set(value);
+ }
+
+ ~ScopedCrashKeyString() { crash_key_->Clear(); }
+
+ private:
+ CrashKeyType* const crash_key_;
+ DISALLOW_COPY_AND_ASSIGN(ScopedCrashKeyString);
+};
+
+namespace internal {
+// Formats a stack trace into a string whose length will not exceed
+// |max_length|. This function ensures no addresses are truncated when
+// being formatted.
+CRASH_KEY_EXPORT std::string FormatStackTrace(
+ const base::debug::StackTrace& trace,
+ size_t max_length);
+} // namespace internal
+
+// Formats a base::debug::StackTrace as a string of space-separated hexadecimal
+// numbers and stores it in a CrashKeyString.
+// TODO(rsesek): When all clients use Crashpad, traces should become a first-
+// class Annotation type rather than being forced through string conversion.
+template <uint32_t Size>
+void SetCrashKeyStringToStackTrace(CrashKeyString<Size>* key,
+ const base::debug::StackTrace& trace) {
+ std::string trace_string = internal::FormatStackTrace(trace, Size);
+ key->Set(trace_string);
+}
+
+// Initializes the crash key subsystem if it is required.
+CRASH_KEY_EXPORT void InitializeCrashKeys();
+
+#if defined(UNIT_TEST) || defined(CRASH_CORE_COMMON_IMPLEMENTATION)
+// Returns a value for the crash key named |key_name|. For Crashpad-based
+// clients, this returns the first instance found of the name.
+CRASH_KEY_EXPORT std::string GetCrashKeyValue(const std::string& key_name);
+
+// Resets crash key state and, depending on the platform, de-initializes
+// the system.
+CRASH_KEY_EXPORT void ResetCrashKeysForTesting();
+#endif
+
+} // namespace crash_reporter
+
+#endif // COMPONENTS_CRASH_CORE_COMMON_CRASH_KEY_H_
diff --git a/components/crash/core/common/crash_key_base_support.cc b/components/crash/core/common/crash_key_base_support.cc
new file mode 100644
index 0000000..f9f2cd6
--- /dev/null
+++ b/components/crash/core/common/crash_key_base_support.cc
@@ -0,0 +1,76 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/crash/core/common/crash_key_base_support.h"
+
+#include <memory>
+
+#include "base/debug/crash_logging.h"
+#include "components/crash/core/common/crash_key.h"
+
+namespace crash_reporter {
+
+namespace {
+
+// This stores the value for a crash key allocated through the //base API.
+template <uint32_t ValueSize>
+struct BaseCrashKeyString : public base::debug::CrashKeyString {
+ BaseCrashKeyString(const char name[], base::debug::CrashKeySize size)
+ : base::debug::CrashKeyString(name, size), impl(name) {
+ DCHECK_EQ(static_cast<uint32_t>(size), ValueSize);
+ }
+ crash_reporter::CrashKeyString<ValueSize> impl;
+};
+
+#define SIZE_CLASS_OPERATION(size_class, operation_prefix, operation_suffix) \
+ switch (size_class) { \
+ case base::debug::CrashKeySize::Size32: \
+ operation_prefix BaseCrashKeyString<32> operation_suffix; \
+ break; \
+ case base::debug::CrashKeySize::Size64: \
+ operation_prefix BaseCrashKeyString<64> operation_suffix; \
+ break; \
+ case base::debug::CrashKeySize::Size256: \
+ operation_prefix BaseCrashKeyString<256> operation_suffix; \
+ break; \
+ }
+
+class CrashKeyBaseSupport : public base::debug::CrashKeyImplementation {
+ public:
+ CrashKeyBaseSupport() = default;
+
+ ~CrashKeyBaseSupport() override = default;
+
+ base::debug::CrashKeyString* Allocate(
+ const char name[],
+ base::debug::CrashKeySize size) override {
+ SIZE_CLASS_OPERATION(size, return new, (name, size));
+ return nullptr;
+ }
+
+ void Set(base::debug::CrashKeyString* crash_key,
+ base::StringPiece value) override {
+ SIZE_CLASS_OPERATION(crash_key->size,
+ reinterpret_cast<, *>(crash_key)->impl.Set(value));
+ }
+
+ void Clear(base::debug::CrashKeyString* crash_key) override {
+ SIZE_CLASS_OPERATION(crash_key->size,
+ reinterpret_cast<, *>(crash_key)->impl.Clear());
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(CrashKeyBaseSupport);
+};
+
+#undef SIZE_CLASS_OPERATION
+
+} // namespace
+
+void InitializeCrashKeyBaseSupport() {
+ base::debug::SetCrashKeyImplementation(
+ std::make_unique<CrashKeyBaseSupport>());
+}
+
+} // namespace crash_reporter
diff --git a/components/crash/core/common/crash_key_base_support.h b/components/crash/core/common/crash_key_base_support.h
new file mode 100644
index 0000000..fcb25ab
--- /dev/null
+++ b/components/crash/core/common/crash_key_base_support.h
@@ -0,0 +1,16 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_CRASH_CORE_COMMON_CRASH_KEY_BASE_SUPPORT_H_
+#define COMPONENTS_CRASH_CORE_COMMON_CRASH_KEY_BASE_SUPPORT_H_
+
+namespace crash_reporter {
+
+// This initializes //base to support crash keys via the interface in
+// base/debug/crash_logging.h.
+void InitializeCrashKeyBaseSupport();
+
+} // namespace crash_reporter
+
+#endif // COMPONENTS_CRASH_CORE_COMMON_CRASH_KEY_BASE_SUPPORT_H_
diff --git a/components/crash/core/common/crash_key_breakpad.cc b/components/crash/core/common/crash_key_breakpad.cc
new file mode 100644
index 0000000..0351e01
--- /dev/null
+++ b/components/crash/core/common/crash_key_breakpad.cc
@@ -0,0 +1,152 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// NOTE: This file is only compiled when Crashpad is not used as the crash
+// reproter.
+
+#include "components/crash/core/common/crash_key.h"
+
+#include "base/debug/crash_logging.h"
+#include "base/format_macros.h"
+#include "base/strings/string_piece.h"
+#include "base/strings/stringprintf.h"
+#include "build/build_config.h"
+#include "components/crash/core/common/crash_key_base_support.h"
+#include "components/crash/core/common/crash_key_internal.h"
+
+#if defined(OS_MACOSX) || defined(OS_IOS) || defined(OS_WIN)
+#error "This file should not be used when Crashpad is available, nor on iOS."
+#endif
+
+namespace crash_reporter {
+namespace internal {
+
+namespace {
+
+// String used to format chunked key names. The __1 through __N syntax is
+// recognized by the crash collector, which will then stitch the numbered
+// parts back into a single string value.
+const char kChunkFormatString[] = "%s__%" PRIuS;
+
+static TransitionalCrashKeyStorage* g_storage = nullptr;
+
+constexpr size_t kUnsetStorageSlotSentinel =
+ TransitionalCrashKeyStorage::num_entries;
+
+} // namespace
+
+TransitionalCrashKeyStorage* GetCrashKeyStorage() {
+ if (!g_storage) {
+ g_storage = new internal::TransitionalCrashKeyStorage();
+ }
+ return g_storage;
+}
+
+void ResetCrashKeyStorageForTesting() {
+ auto* storage = g_storage;
+ g_storage = nullptr;
+ delete storage;
+}
+
+void CrashKeyStringImpl::Set(base::StringPiece value) {
+ const size_t kValueMaxLength = index_array_count_ * kCrashKeyStorageValueSize;
+
+ TransitionalCrashKeyStorage* storage = GetCrashKeyStorage();
+
+ value = value.substr(0, kValueMaxLength);
+
+ // If there is only one slot for the value, then handle it directly.
+ if (index_array_count_ == 1) {
+ std::string value_string = value.as_string();
+ if (is_set()) {
+ storage->SetValueAtIndex(index_array_[0], value_string.c_str());
+ } else {
+ index_array_[0] = storage->SetKeyValue(name_, value_string.c_str());
+ }
+ return;
+ }
+
+ // If the value fits in a single slot, the name of the key should not
+ // end with the __1 suffix of the chunked format.
+ if (value.length() < kCrashKeyStorageValueSize - 1) {
+ if (index_array_[1] != kUnsetStorageSlotSentinel) {
+ // If switching from chunked to non-chunked, clear all the values.
+ Clear();
+ index_array_[0] = storage->SetKeyValue(name_, value.data());
+ } else if (index_array_[0] != kUnsetStorageSlotSentinel) {
+ // The single entry was previously set.
+ storage->SetValueAtIndex(index_array_[0], value.data());
+ } else {
+ // This key was not previously set.
+ index_array_[0] = storage->SetKeyValue(name_, value.data());
+ }
+ return;
+ }
+
+ // If the key was previously set, but only using one slot, then the chunk
+ // name will change (from |name| to |name__1|).
+ if (index_array_[0] != kUnsetStorageSlotSentinel &&
+ index_array_[1] == kUnsetStorageSlotSentinel) {
+ storage->RemoveAtIndex(index_array_[0]);
+ index_array_[0] = kUnsetStorageSlotSentinel;
+ }
+
+ // Otherwise, break the value into chunks labeled name__1 through name__N,
+ // where N is |index_array_count_|.
+ size_t offset = 0;
+ for (size_t i = 0; i < index_array_count_; ++i) {
+ if (offset < value.length()) {
+ // The storage NUL-terminates the value, so ensure that a byte is
+ // not lost when setting individaul chunks.
+ base::StringPiece chunk =
+ value.substr(offset, kCrashKeyStorageValueSize - 1);
+ offset += chunk.length();
+
+ if (index_array_[i] == kUnsetStorageSlotSentinel) {
+ std::string chunk_name =
+ base::StringPrintf(kChunkFormatString, name_, i + 1);
+ index_array_[i] =
+ storage->SetKeyValue(chunk_name.c_str(), chunk.data());
+ } else {
+ storage->SetValueAtIndex(index_array_[i], chunk.data());
+ }
+ } else {
+ storage->RemoveAtIndex(index_array_[i]);
+ index_array_[i] = kUnsetStorageSlotSentinel;
+ }
+ }
+}
+
+void CrashKeyStringImpl::Clear() {
+ for (size_t i = 0; i < index_array_count_; ++i) {
+ GetCrashKeyStorage()->RemoveAtIndex(index_array_[i]);
+ index_array_[i] = kUnsetStorageSlotSentinel;
+ }
+}
+
+bool CrashKeyStringImpl::is_set() const {
+ return index_array_[0] != kUnsetStorageSlotSentinel;
+}
+
+} // namespace internal
+
+void InitializeCrashKeys() {
+ internal::GetCrashKeyStorage();
+ InitializeCrashKeyBaseSupport();
+}
+
+std::string GetCrashKeyValue(const std::string& key_name) {
+ const char* value =
+ internal::GetCrashKeyStorage()->GetValueForKey(key_name.c_str());
+ if (value)
+ return value;
+ return std::string();
+}
+
+void ResetCrashKeysForTesting() {
+ internal::ResetCrashKeyStorageForTesting();
+ base::debug::SetCrashKeyImplementation(nullptr);
+}
+
+} // namespace crash_reporter
diff --git a/components/crash/core/common/crash_key_breakpad_ios.mm b/components/crash/core/common/crash_key_breakpad_ios.mm
new file mode 100644
index 0000000..288453b
--- /dev/null
+++ b/components/crash/core/common/crash_key_breakpad_ios.mm
@@ -0,0 +1,94 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/crash/core/common/crash_key.h"
+
+#include <dispatch/dispatch.h>
+
+#include "base/strings/sys_string_conversions.h"
+#include "components/crash/core/common/crash_key_base_support.h"
+#import "third_party/breakpad/breakpad/src/client/ios/Breakpad.h"
+#import "third_party/breakpad/breakpad/src/client/ios/BreakpadController.h"
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+#error "This file requires ARC support."
+#endif
+
+// The iOS Breakpad implementation internally uses a LongStringDictionary,
+// which performs the same chunking done by crash_key_breakpad.cc. This class
+// implementation therefore just wraps the iOS Breakpad interface.
+
+namespace crash_reporter {
+namespace internal {
+
+namespace {
+
+// Accessing the BreakpadRef is done on an async queue, so serialize the
+// access to the current thread, as the CrashKeyString API is sync. This
+// matches //ios/chrome/browser/crash_report/breakpad_helper.mm.
+void WithBreakpadRefSync(void (^block)(BreakpadRef ref)) {
+ dispatch_semaphore_t semaphore = dispatch_semaphore_create(0);
+
+ [[BreakpadController sharedInstance] withBreakpadRef:^(BreakpadRef ref) {
+ block(ref);
+ dispatch_semaphore_signal(semaphore);
+ }];
+ dispatch_semaphore_wait(semaphore, DISPATCH_TIME_FOREVER);
+}
+
+} // namespace
+
+void CrashKeyStringImpl::Set(base::StringPiece value) {
+ NSString* key = base::SysUTF8ToNSString(name_);
+ NSString* value_ns = base::SysUTF8ToNSString(value.as_string());
+
+ WithBreakpadRefSync(^(BreakpadRef ref) {
+ BreakpadAddUploadParameter(ref, key, value_ns);
+ });
+}
+
+void CrashKeyStringImpl::Clear() {
+ NSString* key = base::SysUTF8ToNSString(name_);
+
+ WithBreakpadRefSync(^(BreakpadRef ref) {
+ BreakpadRemoveUploadParameter(ref, key);
+ });
+}
+
+bool CrashKeyStringImpl::is_set() const {
+ __block bool is_set = false;
+ NSString* key = base::SysUTF8ToNSString(
+ std::string(BREAKPAD_SERVER_PARAMETER_PREFIX) + name_);
+
+ WithBreakpadRefSync(^(BreakpadRef ref) {
+ is_set = BreakpadKeyValue(ref, key) != nil;
+ });
+
+ return is_set;
+}
+
+} // namespace internal
+
+void InitializeCrashKeys() {
+ InitializeCrashKeyBaseSupport();
+}
+
+std::string GetCrashKeyValue(const std::string& key_name) {
+ __block NSString* value;
+ NSString* key = base::SysUTF8ToNSString(
+ std::string(BREAKPAD_SERVER_PARAMETER_PREFIX) + key_name);
+
+ internal::WithBreakpadRefSync(^(BreakpadRef ref) {
+ value = BreakpadKeyValue(ref, key);
+ });
+
+ return base::SysNSStringToUTF8(value);
+}
+
+void ResetCrashKeysForTesting() {
+ // There's no way to do this on iOS without tearing down the
+ // BreakpadController.
+}
+
+} // namespace crash_reporter
diff --git a/components/crash/core/common/crash_key_breakpad_unittest.cc b/components/crash/core/common/crash_key_breakpad_unittest.cc
new file mode 100644
index 0000000..e077766
--- /dev/null
+++ b/components/crash/core/common/crash_key_breakpad_unittest.cc
@@ -0,0 +1,198 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/crash/core/common/crash_key.h"
+
+#include "components/crash/core/common/crash_key_internal.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace crash_reporter {
+
+class CrashKeyBreakpadTest : public testing::Test {
+ public:
+ void SetUp() override {
+ InitializeCrashKeys();
+ ASSERT_TRUE(internal::GetCrashKeyStorage());
+ }
+
+ void TearDown() override { internal::ResetCrashKeyStorageForTesting(); }
+
+ internal::TransitionalCrashKeyStorage* storage() {
+ return internal::GetCrashKeyStorage();
+ }
+
+ size_t* GetIndexArray(internal::CrashKeyStringImpl* key) {
+ return key->index_array_;
+ }
+ size_t GetIndexArrayCount(internal::CrashKeyStringImpl* key) {
+ return key->index_array_count_;
+ }
+};
+
+TEST_F(CrashKeyBreakpadTest, ConstantAssertions) {
+ // Tests in this file generate and validate data based on constants
+ // having specific values. This test asserts those assumptions.
+ EXPECT_EQ(128u, internal::kCrashKeyStorageValueSize);
+}
+
+TEST_F(CrashKeyBreakpadTest, Allocation) {
+ const size_t kSentinel = internal::kCrashKeyStorageNumEntries;
+
+ static CrashKeyString<32> key1("short");
+ ASSERT_EQ(1u, GetIndexArrayCount(&key1));
+ auto* indexes = GetIndexArray(&key1);
+ EXPECT_EQ(kSentinel, indexes[0]);
+
+ // An extra index slot is created for lengths equal to the value size.
+ static CrashKeyString<128> key2("extra");
+ ASSERT_EQ(2u, GetIndexArrayCount(&key2));
+ indexes = GetIndexArray(&key2);
+ EXPECT_EQ(kSentinel, indexes[0]);
+ EXPECT_EQ(kSentinel, indexes[1]);
+
+ static CrashKeyString<395> key3("large");
+ ASSERT_EQ(4u, GetIndexArrayCount(&key3));
+ indexes = GetIndexArray(&key3);
+ EXPECT_EQ(kSentinel, indexes[0]);
+ EXPECT_EQ(kSentinel, indexes[1]);
+ EXPECT_EQ(kSentinel, indexes[2]);
+ EXPECT_EQ(kSentinel, indexes[3]);
+}
+
+TEST_F(CrashKeyBreakpadTest, SetClearSingle) {
+ static CrashKeyString<32> key("test-key");
+
+ EXPECT_FALSE(storage()->GetValueForKey("test-key"));
+ EXPECT_EQ(0u, storage()->GetCount());
+
+ key.Set("value");
+
+ ASSERT_EQ(1u, storage()->GetCount());
+ EXPECT_STREQ("value", storage()->GetValueForKey("test-key"));
+
+ key.Set("value 2");
+
+ ASSERT_EQ(1u, storage()->GetCount());
+ EXPECT_STREQ("value 2", storage()->GetValueForKey("test-key"));
+
+ key.Clear();
+
+ EXPECT_FALSE(storage()->GetValueForKey("test-key"));
+ EXPECT_EQ(0u, storage()->GetCount());
+}
+
+TEST_F(CrashKeyBreakpadTest, SetChunked) {
+ std::string chunk1(128, 'A');
+ std::string chunk2(128, 'B');
+ std::string chunk3(128, 'C');
+
+ static CrashKeyString<400> key("chunky");
+
+ EXPECT_EQ(0u, storage()->GetCount());
+
+ key.Set((chunk1 + chunk2 + chunk3).c_str());
+
+ ASSERT_EQ(4u, storage()->GetCount());
+
+ // Since chunk1 through chunk3 are the same size as a storage slot,
+ // and the storage NUL-terminates the value, ensure no bytes are
+ // lost when chunking.
+ EXPECT_EQ(std::string(127, 'A'), storage()->GetValueForKey("chunky__1"));
+ EXPECT_EQ(std::string("A") + std::string(126, 'B'),
+ storage()->GetValueForKey("chunky__2"));
+ EXPECT_EQ(std::string(2, 'B') + std::string(125, 'C'),
+ storage()->GetValueForKey("chunky__3"));
+ EXPECT_EQ(std::string(3, 'C'), storage()->GetValueForKey("chunky__4"));
+
+ std::string chunk4(240, 'D');
+
+ key.Set(chunk4.c_str());
+
+ ASSERT_EQ(2u, storage()->GetCount());
+
+ EXPECT_EQ(std::string(127, 'D'), storage()->GetValueForKey("chunky__1"));
+ EXPECT_EQ(std::string(240 - 127, 'D'),
+ storage()->GetValueForKey("chunky__2"));
+ EXPECT_FALSE(storage()->GetValueForKey("chunky__3"));
+
+ key.Clear();
+
+ EXPECT_EQ(0u, storage()->GetCount());
+}
+
+TEST_F(CrashKeyBreakpadTest, SetTwoChunked) {
+ static CrashKeyString<600> key1("big");
+ static CrashKeyString<256> key2("small");
+
+ EXPECT_EQ(0u, storage()->GetCount());
+
+ key1.Set(std::string(200, '1').c_str());
+
+ ASSERT_EQ(2u, storage()->GetCount());
+
+ EXPECT_EQ(std::string(127, '1'), storage()->GetValueForKey("big__1"));
+ EXPECT_EQ(std::string(73, '1'), storage()->GetValueForKey("big__2"));
+
+ key2.Set(std::string(256, '2').c_str());
+
+ ASSERT_EQ(5u, storage()->GetCount());
+
+ EXPECT_EQ(std::string(127, '1'), storage()->GetValueForKey("big__1"));
+ EXPECT_EQ(std::string(73, '1'), storage()->GetValueForKey("big__2"));
+ EXPECT_EQ(std::string(127, '2'), storage()->GetValueForKey("small__1"));
+ EXPECT_EQ(std::string(127, '2'), storage()->GetValueForKey("small__2"));
+ EXPECT_EQ(std::string(2, '2'), storage()->GetValueForKey("small__3"));
+
+ key1.Set(std::string(510, '3').c_str());
+
+ ASSERT_EQ(8u, storage()->GetCount());
+
+ EXPECT_EQ(std::string(127, '3'), storage()->GetValueForKey("big__1"));
+ EXPECT_EQ(std::string(127, '3'), storage()->GetValueForKey("big__2"));
+ EXPECT_EQ(std::string(127, '3'), storage()->GetValueForKey("big__3"));
+ EXPECT_EQ(std::string(127, '3'), storage()->GetValueForKey("big__4"));
+ EXPECT_EQ(std::string(2, '3'), storage()->GetValueForKey("big__5"));
+ EXPECT_EQ(std::string(127, '2'), storage()->GetValueForKey("small__1"));
+ EXPECT_EQ(std::string(127, '2'), storage()->GetValueForKey("small__2"));
+ EXPECT_EQ(std::string(2, '2'), storage()->GetValueForKey("small__3"));
+
+ key2.Clear();
+
+ ASSERT_EQ(5u, storage()->GetCount());
+
+ EXPECT_EQ(std::string(127, '3'), storage()->GetValueForKey("big__1"));
+ EXPECT_EQ(std::string(127, '3'), storage()->GetValueForKey("big__2"));
+ EXPECT_EQ(std::string(127, '3'), storage()->GetValueForKey("big__3"));
+ EXPECT_EQ(std::string(127, '3'), storage()->GetValueForKey("big__4"));
+ EXPECT_EQ(std::string(2, '3'), storage()->GetValueForKey("big__5"));
+}
+
+TEST_F(CrashKeyBreakpadTest, ChunkSingleEntry) {
+ static CrashKeyString<200> crash_key("split");
+
+ EXPECT_EQ(0u, storage()->GetCount());
+
+ crash_key.Set("test");
+
+ ASSERT_EQ(1u, storage()->GetCount());
+ EXPECT_STREQ("test", storage()->GetValueForKey("split"));
+
+ crash_key.Set(std::string(127, 'z') + "bloop");
+
+ ASSERT_EQ(2u, storage()->GetCount());
+ EXPECT_EQ(std::string(127, 'z'), storage()->GetValueForKey("split__1"));
+ EXPECT_STREQ("bloop", storage()->GetValueForKey("split__2"));
+
+ crash_key.Set("abcdefg");
+
+ ASSERT_EQ(1u, storage()->GetCount());
+ EXPECT_STREQ("abcdefg", storage()->GetValueForKey("split"));
+
+ crash_key.Set("hijklmnop");
+
+ ASSERT_EQ(1u, storage()->GetCount());
+ EXPECT_STREQ("hijklmnop", storage()->GetValueForKey("split"));
+}
+
+} // namespace crash_reporter
diff --git a/components/crash/core/common/crash_key_crashpad.cc b/components/crash/core/common/crash_key_crashpad.cc
new file mode 100644
index 0000000..929392e
--- /dev/null
+++ b/components/crash/core/common/crash_key_crashpad.cc
@@ -0,0 +1,51 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// NOTE: This file is only compiled when Crashpad is used as the crash
+// reproter.
+
+#include "components/crash/core/common/crash_key.h"
+
+#include "base/debug/crash_logging.h"
+#include "components/crash/core/common/crash_key_base_support.h"
+#include "third_party/crashpad/crashpad/client/annotation_list.h"
+#include "third_party/crashpad/crashpad/client/crashpad_info.h"
+
+namespace crash_reporter {
+
+void InitializeCrashKeys() {
+ crashpad::AnnotationList::Register();
+ InitializeCrashKeyBaseSupport();
+}
+
+// Returns a value for the crash key named |key_name|. For Crashpad-based
+// clients, this returns the first instance found of the name.
+std::string GetCrashKeyValue(const std::string& key_name) {
+ auto* annotation_list = crashpad::AnnotationList::Get();
+ if (annotation_list) {
+ for (crashpad::Annotation* annotation : *annotation_list) {
+ if (key_name == annotation->name()) {
+ return std::string(static_cast<const char*>(annotation->value()),
+ annotation->size());
+ }
+ }
+ }
+
+ return std::string();
+}
+
+void ResetCrashKeysForTesting() {
+ // The AnnotationList should not be deleted because the static Annotation
+ // object data still reference the link nodes.
+ auto* annotation_list = crashpad::AnnotationList::Get();
+ if (annotation_list) {
+ for (crashpad::Annotation* annotation : *annotation_list) {
+ annotation->Clear();
+ }
+ }
+
+ base::debug::SetCrashKeyImplementation(nullptr);
+}
+
+} // namespace crash_reporter
diff --git a/components/crash/core/common/crash_key_internal.h b/components/crash/core/common/crash_key_internal.h
new file mode 100644
index 0000000..817b16a
--- /dev/null
+++ b/components/crash/core/common/crash_key_internal.h
@@ -0,0 +1,26 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_CRASH_CORE_COMMON_CRASH_KEY_INTERNAL_H_
+#define COMPONENTS_CRASH_CORE_COMMON_CRASH_KEY_INTERNAL_H_
+
+#include "components/crash/core/common/crash_export.h"
+#include "components/crash/core/common/crash_key.h"
+#include "third_party/breakpad/breakpad/src/common/simple_string_dictionary.h"
+
+namespace crash_reporter {
+namespace internal {
+
+using TransitionalCrashKeyStorage = google_breakpad::
+ NonAllocatingMap<40, kCrashKeyStorageValueSize, kCrashKeyStorageNumEntries>;
+
+// Accesses the underlying storage for crash keys for non-Crashpad clients.
+CRASH_KEY_EXPORT TransitionalCrashKeyStorage* GetCrashKeyStorage();
+
+CRASH_KEY_EXPORT void ResetCrashKeyStorageForTesting();
+
+} // namespace internal
+} // namespace crash_reporter
+
+#endif // COMPONENTS_CRASH_CORE_COMMON_CRASH_KEY_INTERNAL_H_
diff --git a/components/crash/core/common/crash_key_stubs.cc b/components/crash/core/common/crash_key_stubs.cc
new file mode 100644
index 0000000..5663eb8
--- /dev/null
+++ b/components/crash/core/common/crash_key_stubs.cc
@@ -0,0 +1,37 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is only used for OS_FUCHSIA, since there is no crash reporter
+// for that platform.
+
+#include "build/build_config.h"
+#include "components/crash/core/common/crash_key.h"
+
+#if !defined(STARBOARD) && !BUILDFLAG(USE_CRASH_KEY_STUBS)
+#error "This file should only be compiled when using stubs."
+#endif
+
+namespace crash_reporter {
+
+namespace internal {
+
+void CrashKeyStringImpl::Set(base::StringPiece value) {}
+
+void CrashKeyStringImpl::Clear() {}
+
+bool CrashKeyStringImpl::is_set() const {
+ return false;
+}
+
+} // namespace internal
+
+void InitializeCrashKeys() {}
+
+std::string GetCrashKeyValue(const std::string& key_name) {
+ return std::string();
+}
+
+void ResetCrashKeysForTesting() {}
+
+} // namespace crash_reporter
diff --git a/components/crash/core/common/crash_key_unittest.cc b/components/crash/core/common/crash_key_unittest.cc
new file mode 100644
index 0000000..466eb25
--- /dev/null
+++ b/components/crash/core/common/crash_key_unittest.cc
@@ -0,0 +1,120 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/crash/core/common/crash_key.h"
+
+#include "base/debug/crash_logging.h"
+#include "base/debug/stack_trace.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace crash_reporter {
+namespace {
+
+class CrashKeyStringTest : public testing::Test {
+ public:
+ void SetUp() override { InitializeCrashKeys(); }
+};
+
+TEST_F(CrashKeyStringTest, ScopedCrashKeyString) {
+ static CrashKeyString<32> key("test-scope");
+
+ EXPECT_FALSE(key.is_set());
+
+ {
+ ScopedCrashKeyString scoper(&key, "value");
+ EXPECT_TRUE(key.is_set());
+ }
+
+ EXPECT_FALSE(key.is_set());
+}
+
+TEST_F(CrashKeyStringTest, FormatStackTrace) {
+ const uintptr_t addresses[] = {
+ 0x0badbeef, 0x77778888, 0xabc, 0x000ddeeff, 0x12345678,
+ };
+ base::debug::StackTrace trace(reinterpret_cast<const void* const*>(addresses),
+ arraysize(addresses));
+
+ std::string too_small = internal::FormatStackTrace(trace, 3);
+ EXPECT_EQ(0u, too_small.size());
+
+ std::string one_value = internal::FormatStackTrace(trace, 16);
+ EXPECT_EQ("0xbadbeef", one_value);
+
+ std::string three_values = internal::FormatStackTrace(trace, 30);
+ EXPECT_EQ("0xbadbeef 0x77778888 0xabc", three_values);
+
+ std::string all_values = internal::FormatStackTrace(trace, 128);
+ EXPECT_EQ("0xbadbeef 0x77778888 0xabc 0xddeeff 0x12345678", all_values);
+}
+
+#if defined(ARCH_CPU_64_BITS)
+TEST_F(CrashKeyStringTest, FormatStackTrace64) {
+ const uintptr_t addresses[] = {
+ 0xbaaaabaaaaba, 0x1000000000000000,
+ };
+ base::debug::StackTrace trace(reinterpret_cast<const void* const*>(addresses),
+ arraysize(addresses));
+
+ std::string too_small = internal::FormatStackTrace(trace, 8);
+ EXPECT_EQ(0u, too_small.size());
+
+ std::string one_value = internal::FormatStackTrace(trace, 20);
+ EXPECT_EQ("0xbaaaabaaaaba", one_value);
+
+ std::string all_values = internal::FormatStackTrace(trace, 35);
+ EXPECT_EQ("0xbaaaabaaaaba 0x1000000000000000", all_values);
+}
+#endif
+
+// In certain build configurations, StackTrace will produce an
+// empty result, which will cause the test to fail.
+#if !defined(OFFICIAL_BUILD) && !defined(NO_UNWIND_TABLES)
+TEST_F(CrashKeyStringTest, SetStackTrace) {
+ static CrashKeyString<1024> key("test-trace");
+
+ EXPECT_FALSE(key.is_set());
+
+ SetCrashKeyStringToStackTrace(&key, base::debug::StackTrace());
+
+ EXPECT_TRUE(key.is_set());
+}
+#endif
+
+TEST_F(CrashKeyStringTest, BaseSupport) {
+ static base::debug::CrashKeyString* crash_key =
+ base::debug::AllocateCrashKeyString("base-support",
+ base::debug::CrashKeySize::Size64);
+
+ EXPECT_TRUE(crash_key);
+
+ base::debug::SetCrashKeyString(crash_key, "this is a test");
+
+ base::debug::ClearCrashKeyString(crash_key);
+
+ base::debug::SetCrashKeyString(crash_key, std::string(128, 'b'));
+ base::debug::SetCrashKeyString(crash_key, std::string(64, 'a'));
+}
+
+TEST_F(CrashKeyStringTest, CArrayInitializer) {
+ static CrashKeyString<8> keys[] = {
+ {"test-1", CrashKeyString<8>::Tag::kArray},
+ {"test-2", CrashKeyString<8>::Tag::kArray},
+ {"test-3", CrashKeyString<8>::Tag::kArray},
+ };
+
+ EXPECT_FALSE(keys[0].is_set());
+ EXPECT_FALSE(keys[1].is_set());
+ EXPECT_FALSE(keys[2].is_set());
+
+ keys[1].Set("test");
+
+ EXPECT_FALSE(keys[0].is_set());
+ EXPECT_TRUE(keys[1].is_set());
+ EXPECT_FALSE(keys[2].is_set());
+}
+
+} // namespace
+} // namespace crash_reporter
diff --git a/components/crash/core/common/crash_keys.cc b/components/crash/core/common/crash_keys.cc
new file mode 100644
index 0000000..bbb0af7
--- /dev/null
+++ b/components/crash/core/common/crash_keys.cc
@@ -0,0 +1,147 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/crash/core/common/crash_keys.h"
+
+#include <deque>
+#include <vector>
+
+#include "base/command_line.h"
+#include "base/format_macros.h"
+#include "base/no_destructor.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_piece.h"
+#include "base/strings/string_split.h"
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/strings/utf_string_conversions.h"
+#include "build/build_config.h"
+#include "components/crash/core/common/crash_key.h"
+
+namespace crash_keys {
+
+namespace {
+
+#if defined(OS_MACOSX) || defined(OS_WIN)
+// When using Crashpad, the crash reporting client ID is the responsibility of
+// Crashpad. It is not set directly by Chrome. To make the metrics client ID
+// available on the server, it's stored in a distinct key.
+const char kMetricsClientId[] = "metrics_client_id";
+#else
+// When using Breakpad instead of Crashpad, the crash reporting client ID is the
+// same as the metrics client ID.
+const char kMetricsClientId[] = "guid";
+#endif
+
+crash_reporter::CrashKeyString<40> client_id_key(kMetricsClientId);
+
+} // namespace
+
+void SetMetricsClientIdFromGUID(const std::string& metrics_client_guid) {
+ std::string stripped_guid(metrics_client_guid);
+ // Remove all instance of '-' char from the GUID. So BCD-WXY becomes BCDWXY.
+ base::ReplaceSubstringsAfterOffset(
+ &stripped_guid, 0, "-", base::StringPiece());
+ if (stripped_guid.empty())
+ return;
+
+ client_id_key.Set(stripped_guid);
+}
+
+void ClearMetricsClientId() {
+#if defined(OS_MACOSX) || defined(OS_WIN)
+ // Crashpad always monitors for crashes, but doesn't upload them when
+ // crash reporting is disabled. The preference to upload crash reports is
+ // linked to the preference for metrics reporting. When metrics reporting is
+ // disabled, don't put the metrics client ID into crash dumps. This way, crash
+ // reports that are saved but not uploaded will not have a metrics client ID
+ // from the time that metrics reporting was disabled even if they are uploaded
+ // by user action at a later date.
+ //
+ // Breakpad cannot be enabled or disabled without an application restart, and
+ // it needs to use the metrics client ID as its stable crash client ID, so
+ // leave its client ID intact even when metrics reporting is disabled while
+ // the application is running.
+ client_id_key.Clear();
+#endif
+}
+
+using SwitchesCrashKeys = std::deque<crash_reporter::CrashKeyString<64>>;
+SwitchesCrashKeys& GetSwitchesCrashKeys() {
+ static base::NoDestructor<SwitchesCrashKeys> switches_keys;
+ return *switches_keys;
+}
+
+static crash_reporter::CrashKeyString<4> num_switches_key("num-switches");
+
+void SetSwitchesFromCommandLine(const base::CommandLine& command_line,
+ SwitchFilterFunction skip_filter) {
+ const base::CommandLine::StringVector& argv = command_line.argv();
+
+ // Set the number of switches in case of uninteresting switches in
+ // command_line.
+ num_switches_key.Set(base::NumberToString(argv.size() - 1));
+
+ size_t key_i = 0;
+
+ // Go through the argv, skipping the exec path. Stop if there are too many
+ // switches to hold in crash keys.
+ for (size_t i = 1; i < argv.size(); ++i) {
+#if defined(OS_WIN)
+ std::string switch_str = base::WideToUTF8(argv[i]);
+#else
+ std::string switch_str = argv[i];
+#endif
+
+ // Skip uninteresting switches.
+ if (skip_filter && (*skip_filter)(switch_str))
+ continue;
+
+ if (key_i >= GetSwitchesCrashKeys().size()) {
+ static base::NoDestructor<std::deque<std::string>> crash_keys_names;
+ crash_keys_names->emplace_back(
+ base::StringPrintf("switch-%" PRIuS, key_i + 1));
+ GetSwitchesCrashKeys().emplace_back(crash_keys_names->back().c_str());
+ }
+ GetSwitchesCrashKeys()[key_i++].Set(switch_str);
+ }
+
+ // Clear any remaining switches.
+ for (; key_i < GetSwitchesCrashKeys().size(); ++key_i)
+ GetSwitchesCrashKeys()[key_i].Clear();
+}
+
+void ResetCommandLineForTesting() {
+ num_switches_key.Clear();
+ for (auto& key : GetSwitchesCrashKeys()) {
+ key.Clear();
+ }
+}
+
+using PrinterInfoKey = crash_reporter::CrashKeyString<64>;
+static PrinterInfoKey printer_info_keys[] = {
+ {"prn-info-1", PrinterInfoKey::Tag::kArray},
+ {"prn-info-2", PrinterInfoKey::Tag::kArray},
+ {"prn-info-3", PrinterInfoKey::Tag::kArray},
+ {"prn-info-4", PrinterInfoKey::Tag::kArray},
+};
+
+ScopedPrinterInfo::ScopedPrinterInfo(base::StringPiece data) {
+ std::vector<base::StringPiece> info = base::SplitStringPiece(
+ data, ";", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
+ for (size_t i = 0; i < arraysize(printer_info_keys); ++i) {
+ if (i < info.size())
+ printer_info_keys[i].Set(info[i]);
+ else
+ printer_info_keys[i].Clear();
+ }
+}
+
+ScopedPrinterInfo::~ScopedPrinterInfo() {
+ for (auto& crash_key : printer_info_keys) {
+ crash_key.Clear();
+ }
+}
+
+} // namespace crash_keys
diff --git a/components/crash/core/common/crash_keys.h b/components/crash/core/common/crash_keys.h
new file mode 100644
index 0000000..20eb442
--- /dev/null
+++ b/components/crash/core/common/crash_keys.h
@@ -0,0 +1,50 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_CRASH_CORE_COMMON_CRASH_KEYS_H_
+#define COMPONENTS_CRASH_CORE_COMMON_CRASH_KEYS_H_
+
+#include <string>
+
+#include "base/strings/string_piece.h"
+
+namespace base {
+class CommandLine;
+} // namespace base
+
+namespace crash_keys {
+
+// Sets the ID (which may either be a full GUID or a GUID that was already
+// stripped from its dashes -- in either case this method will strip remaining
+// dashes before setting the crash key).
+void SetMetricsClientIdFromGUID(const std::string& metrics_client_guid);
+void ClearMetricsClientId();
+
+// A function returning true if |flag| is a switch that should be filtered out
+// of crash keys.
+using SwitchFilterFunction = bool (*)(const std::string& flag);
+
+// Sets the "num-switches" key and a set of keys named using kSwitchFormat based
+// on the given |command_line|. If |skip_filter| is not null, ignore any switch
+// for which it returns true.
+void SetSwitchesFromCommandLine(const base::CommandLine& command_line,
+ SwitchFilterFunction skip_filter);
+
+// Clears all the CommandLine-related crash keys.
+void ResetCommandLineForTesting();
+
+// Sets the printer info. Data should be separated by ';' up to 4 substrings.
+// Each substring will be truncated if necessary.
+class ScopedPrinterInfo {
+ public:
+ explicit ScopedPrinterInfo(base::StringPiece data);
+ ~ScopedPrinterInfo();
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ScopedPrinterInfo);
+};
+
+} // namespace crash_keys
+
+#endif // COMPONENTS_CRASH_CORE_COMMON_CRASH_KEYS_H_
diff --git a/components/crash/core/common/crash_keys_unittest.cc b/components/crash/core/common/crash_keys_unittest.cc
new file mode 100644
index 0000000..603cc36
--- /dev/null
+++ b/components/crash/core/common/crash_keys_unittest.cc
@@ -0,0 +1,113 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/crash/core/common/crash_keys.h"
+
+#include <string>
+
+#include "base/command_line.h"
+#include "base/compiler_specific.h"
+#include "base/debug/crash_logging.h"
+#include "base/format_macros.h"
+#include "base/strings/string_piece.h"
+#include "base/strings/stringprintf.h"
+#include "components/crash/core/common/crash_key.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using crash_reporter::GetCrashKeyValue;
+
+// The number of switch-N keys declared in SetSwitchesFromCommandLine().
+constexpr int kSwitchesMaxCount = 15;
+
+class CrashKeysTest : public testing::Test {
+ public:
+ void SetUp() override {
+ ResetData();
+ crash_reporter::InitializeCrashKeys();
+ }
+
+ void TearDown() override {
+ ResetData();
+ }
+
+ private:
+ void ResetData() {
+ crash_keys::ResetCommandLineForTesting();
+ crash_reporter::ResetCrashKeysForTesting();
+ }
+};
+
+TEST_F(CrashKeysTest, Switches) {
+ // Set three switches.
+ {
+ base::CommandLine command_line(base::CommandLine::NO_PROGRAM);
+ for (size_t i = 1; i <= 3; ++i)
+ command_line.AppendSwitch(base::StringPrintf("--flag-%" PRIuS, i));
+ crash_keys::SetSwitchesFromCommandLine(command_line, nullptr);
+ EXPECT_EQ("--flag-1", GetCrashKeyValue("switch-1"));
+ EXPECT_EQ("--flag-2", GetCrashKeyValue("switch-2"));
+ EXPECT_EQ("--flag-3", GetCrashKeyValue("switch-3"));
+ EXPECT_TRUE(GetCrashKeyValue("switch-4").empty());
+ }
+
+ // Set more than 15 switches.
+ {
+ base::CommandLine command_line(base::CommandLine::NO_PROGRAM);
+ const size_t kMax = kSwitchesMaxCount + 2;
+ EXPECT_GT(kMax, static_cast<size_t>(15));
+ for (size_t i = 1; i <= kMax; ++i)
+ command_line.AppendSwitch(base::StringPrintf("--many-%" PRIuS, i));
+ crash_keys::SetSwitchesFromCommandLine(command_line, nullptr);
+ EXPECT_EQ("--many-1", GetCrashKeyValue("switch-1"));
+ EXPECT_EQ("--many-9", GetCrashKeyValue("switch-9"));
+ EXPECT_EQ("--many-15", GetCrashKeyValue("switch-15"));
+ EXPECT_FALSE(GetCrashKeyValue("switch-16").empty());
+ EXPECT_FALSE(GetCrashKeyValue("switch-17").empty());
+ }
+
+ // Set fewer to ensure that old ones are erased.
+ {
+ base::CommandLine command_line(base::CommandLine::NO_PROGRAM);
+ for (int i = 1; i <= 5; ++i)
+ command_line.AppendSwitch(base::StringPrintf("--fewer-%d", i));
+ crash_keys::SetSwitchesFromCommandLine(command_line, nullptr);
+ EXPECT_EQ("--fewer-1", GetCrashKeyValue("switch-1"));
+ EXPECT_EQ("--fewer-2", GetCrashKeyValue("switch-2"));
+ EXPECT_EQ("--fewer-3", GetCrashKeyValue("switch-3"));
+ EXPECT_EQ("--fewer-4", GetCrashKeyValue("switch-4"));
+ EXPECT_EQ("--fewer-5", GetCrashKeyValue("switch-5"));
+ for (int i = 6; i < 20; ++i)
+ EXPECT_TRUE(GetCrashKeyValue(base::StringPrintf("switch-%d", i)).empty());
+ }
+}
+
+namespace {
+
+bool IsBoringFlag(const std::string& flag) {
+ return flag.compare("--boring") == 0;
+}
+
+} // namespace
+
+TEST_F(CrashKeysTest, FilterFlags) {
+ base::CommandLine command_line(base::CommandLine::NO_PROGRAM);
+ command_line.AppendSwitch("--not-boring-1");
+ command_line.AppendSwitch("--boring");
+
+ // Include the max number of non-boring switches, to make sure that only the
+ // switches actually included in the crash keys are counted.
+ for (size_t i = 2; i <= kSwitchesMaxCount; ++i)
+ command_line.AppendSwitch(base::StringPrintf("--not-boring-%" PRIuS, i));
+
+ crash_keys::SetSwitchesFromCommandLine(command_line, &IsBoringFlag);
+
+ // If the boring keys are filtered out, every single key should now be
+ // not-boring.
+ for (int i = 1; i <= kSwitchesMaxCount; ++i) {
+ std::string switch_name = base::StringPrintf("switch-%d", i);
+ std::string switch_value = base::StringPrintf("--not-boring-%d", i);
+ EXPECT_EQ(switch_value, GetCrashKeyValue(switch_name))
+ << "switch_name is " << switch_name;
+ }
+}
diff --git a/components/crash/core/common/objc_zombie.h b/components/crash/core/common/objc_zombie.h
new file mode 100644
index 0000000..f1aa8e3
--- /dev/null
+++ b/components/crash/core/common/objc_zombie.h
@@ -0,0 +1,49 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_CRASH_CORE_COMMON_OBJC_ZOMBIE_H_
+#define COMPONENTS_CRASH_CORE_COMMON_OBJC_ZOMBIE_H_
+
+#include <stddef.h>
+
+#include "build/build_config.h"
+#include "components/crash/core/common/crash_export.h"
+
+// You should think twice every single time you use anything from this
+// namespace.
+namespace ObjcEvilDoers {
+
+// Enable zombie object debugging. This implements a variant of Apple's
+// NSZombieEnabled which can help expose use-after-free errors where messages
+// are sent to freed Objective-C objects in production builds.
+//
+// Returns NO if it fails to enable.
+//
+// When |zombieAllObjects| is YES, all objects inheriting from
+// NSObject become zombies on -dealloc. If NO, -shouldBecomeCrZombie
+// is queried to determine whether to make the object a zombie.
+//
+// |zombieCount| controls how many zombies to store before freeing the
+// oldest. Set to 0 to free objects immediately after making them
+// zombies.
+bool CRASH_EXPORT ZombieEnable(bool zombieAllObjects, size_t zombieCount);
+
+// Disable zombies.
+void CRASH_EXPORT ZombieDisable();
+
+} // namespace ObjcEvilDoers
+
+#if defined(OS_MACOSX)
+#if defined(__OBJC__)
+
+#import <Foundation/Foundation.h>
+
+@interface NSObject (CrZombie)
+- (BOOL)shouldBecomeCrZombie;
+@end
+
+#endif // __OBJC__
+#endif // OS_MACOSX
+
+#endif // COMPONENTS_CRASH_CORE_COMMON_OBJC_ZOMBIE_H_
diff --git a/components/crash/core/common/objc_zombie.mm b/components/crash/core/common/objc_zombie.mm
new file mode 100644
index 0000000..9af0cee
--- /dev/null
+++ b/components/crash/core/common/objc_zombie.mm
@@ -0,0 +1,434 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#import "components/crash/core/common/objc_zombie.h"
+
+#include <AvailabilityMacros.h>
+#include <string.h>
+
+#include <execinfo.h>
+#import <objc/runtime.h>
+
+#include <algorithm>
+
+#include "base/debug/stack_trace.h"
+#include "base/logging.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/strings/stringprintf.h"
+#include "base/synchronization/lock.h"
+#include "build/build_config.h"
+#include "components/crash/core/common/crash_key.h"
+
+// Deallocated objects are re-classed as |CrZombie|. No superclass
+// because then the class would have to override many/most of the
+// inherited methods (|NSObject| is like a category magnet!).
+// Without the __attribute__, clang's -Wobjc-root-class warns on the missing
+// superclass.
+__attribute__((objc_root_class))
+@interface CrZombie {
+ Class isa;
+}
+@end
+
+// Objects with enough space are made into "fat" zombies, which
+// directly remember which class they were until reallocated.
+@interface CrFatZombie : CrZombie {
+ @public
+ Class wasa;
+}
+@end
+
+namespace {
+
+// The depth of backtrace to store with zombies. This directly influences
+// the amount of memory required to track zombies, so should be kept as
+// small as is useful. Unfortunately, too small and it won't poke through
+// deep autorelease and event loop stacks.
+// NOTE(shess): Breakpad currently restricts values to 255 bytes. The
+// trace is hex-encoded with "0x" prefix and " " separators, meaning
+// the maximum number of 32-bit items which can be encoded is 23.
+const size_t kBacktraceDepth = 20;
+
+// The original implementation for |-[NSObject dealloc]|.
+IMP g_originalDeallocIMP = NULL;
+
+// Classes which freed objects become. |g_fatZombieSize| is the
+// minimum object size which can be made into a fat zombie (which can
+// remember which class it was before free, even after falling off the
+// treadmill).
+Class g_zombieClass = Nil; // cached [CrZombie class]
+Class g_fatZombieClass = Nil; // cached [CrFatZombie class]
+size_t g_fatZombieSize = 0;
+
+// Whether to zombie all freed objects, or only those which return YES
+// from |-shouldBecomeCrZombie|.
+BOOL g_zombieAllObjects = NO;
+
+// Protects |g_zombieCount|, |g_zombieIndex|, and |g_zombies|.
+base::Lock& GetLock() {
+ static auto* lock = new base::Lock();
+ return *lock;
+}
+
+// How many zombies to keep before freeing, and the current head of
+// the circular buffer.
+size_t g_zombieCount = 0;
+size_t g_zombieIndex = 0;
+
+typedef struct {
+ id object; // The zombied object.
+ Class wasa; // Value of |object->isa| before we replaced it.
+ void* trace[kBacktraceDepth]; // Backtrace at point of deallocation.
+ size_t traceDepth; // Actual depth of trace[].
+} ZombieRecord;
+
+ZombieRecord* g_zombies = NULL;
+
+// Replacement |-dealloc| which turns objects into zombies and places
+// them into |g_zombies| to be freed later.
+void ZombieDealloc(id self, SEL _cmd) {
+ // This code should only be called when it is implementing |-dealloc|.
+ DCHECK_EQ(_cmd, @selector(dealloc));
+
+ // Use the original |-dealloc| if the object doesn't wish to be
+ // zombied.
+ if (!g_zombieAllObjects && ![self shouldBecomeCrZombie]) {
+ g_originalDeallocIMP(self, _cmd);
+ return;
+ }
+
+ Class wasa = object_getClass(self);
+ const size_t size = class_getInstanceSize(wasa);
+
+ // Destroy the instance by calling C++ destructors and clearing it
+ // to something unlikely to work well if someone references it.
+ // NOTE(shess): |object_dispose()| will call this again when the
+ // zombie falls off the treadmill! But by then |isa| will be a
+ // class without C++ destructors or associative references, so it
+ // won't hurt anything.
+ objc_destructInstance(self);
+ memset(self, '!', size);
+
+ // If the instance is big enough, make it into a fat zombie and have
+ // it remember the old |isa|. Otherwise make it a regular zombie.
+ // Setting |isa| rather than using |object_setClass()| because that
+ // function is implemented with a memory barrier. The runtime's
+ // |_internal_object_dispose()| (in objc-class.m) does this, so it
+ // should be safe (messaging free'd objects shouldn't be expected to
+ // be thread-safe in the first place).
+#pragma clang diagnostic push // clang warns about direct access to isa.
+#pragma clang diagnostic ignored "-Wdeprecated-objc-isa-usage"
+ if (size >= g_fatZombieSize) {
+ self->isa = g_fatZombieClass;
+ static_cast<CrFatZombie*>(self)->wasa = wasa;
+ } else {
+ self->isa = g_zombieClass;
+ }
+#pragma clang diagnostic pop
+
+ // The new record to swap into |g_zombies|. If |g_zombieCount| is
+ // zero, then |self| will be freed immediately.
+ ZombieRecord zombieToFree = {self, wasa};
+ zombieToFree.traceDepth =
+ std::max(backtrace(zombieToFree.trace, kBacktraceDepth), 0);
+
+ // Don't involve the lock when creating zombies without a treadmill.
+ if (g_zombieCount > 0) {
+ base::AutoLock pin(GetLock());
+
+ // Check the count again in a thread-safe manner.
+ if (g_zombieCount > 0) {
+ // Put the current object on the treadmill and keep the previous
+ // occupant.
+ std::swap(zombieToFree, g_zombies[g_zombieIndex]);
+
+ // Bump the index forward.
+ g_zombieIndex = (g_zombieIndex + 1) % g_zombieCount;
+ }
+ }
+
+ // Do the free out here to prevent any chance of deadlock.
+ if (zombieToFree.object)
+ object_dispose(zombieToFree.object);
+}
+
+// Search the treadmill for |object| and fill in |*record| if found.
+// Returns YES if found.
+BOOL GetZombieRecord(id object, ZombieRecord* record) {
+ // Holding the lock is reasonable because this should be fast, and
+ // the process is going to crash presently anyhow.
+ base::AutoLock pin(GetLock());
+ for (size_t i = 0; i < g_zombieCount; ++i) {
+ if (g_zombies[i].object == object) {
+ *record = g_zombies[i];
+ return YES;
+ }
+ }
+ return NO;
+}
+
+// Dump the symbols. This is pulled out into a function to make it
+// easy to use DCHECK to dump only in debug builds.
+BOOL DumpDeallocTrace(const void* const* array, int size) {
+ // Async-signal safe version of fputs, consistent with StackTrace::Print().
+ const char message[] = "Backtrace from -dealloc:\n";
+ ignore_result(HANDLE_EINTR(write(STDERR_FILENO, message, strlen(message))));
+ base::debug::StackTrace(array, size).Print();
+
+ return YES;
+}
+
+// Log a message to a freed object. |wasa| is the object's original
+// class. |aSelector| is the selector which the calling code was
+// attempting to send. |viaSelector| is the selector of the
+// dispatch-related method which is being invoked to send |aSelector|
+// (for instance, -respondsToSelector:).
+void ZombieObjectCrash(id object, SEL aSelector, SEL viaSelector) {
+ ZombieRecord record;
+ BOOL found = GetZombieRecord(object, &record);
+
+ // The object's class can be in the zombie record, but if that is
+ // not available it can also be in the object itself (in most cases).
+ Class wasa = Nil;
+ if (found) {
+ wasa = record.wasa;
+ } else if (object_getClass(object) == g_fatZombieClass) {
+ wasa = static_cast<CrFatZombie*>(object)->wasa;
+ }
+ const char* wasaName = (wasa ? class_getName(wasa) : "<unknown>");
+
+ std::string aString = base::StringPrintf("Zombie <%s: %p> received -%s",
+ wasaName, object, sel_getName(aSelector));
+ if (viaSelector != NULL) {
+ const char* viaName = sel_getName(viaSelector);
+ base::StringAppendF(&aString, " (via -%s)", viaName);
+ }
+
+ // Set a value for breakpad to report.
+ static crash_reporter::CrashKeyString<256> zombie_key("zombie");
+ zombie_key.Set(aString);
+
+ // Encode trace into a breakpad key.
+ static crash_reporter::CrashKeyString<1024> zombie_trace_key(
+ "zombie_dealloc_bt");
+ if (found) {
+ crash_reporter::SetCrashKeyStringToStackTrace(
+ &zombie_trace_key,
+ base::debug::StackTrace(record.trace, record.traceDepth));
+ }
+
+ // Log -dealloc backtrace in debug builds then crash with a useful
+ // stack trace.
+ if (found && record.traceDepth) {
+ DCHECK(DumpDeallocTrace(record.trace, record.traceDepth));
+ } else {
+ DLOG(WARNING) << "Unable to generate backtrace from -dealloc.";
+ }
+ DLOG(FATAL) << aString;
+
+ // This is how about:crash is implemented. Using instead of
+ // |base::debug::BreakDebugger()| or |LOG(FATAL)| to make the top of
+ // stack more immediately obvious in crash dumps.
+ int* zero = NULL;
+ *zero = 0;
+}
+
+// Initialize our globals, returning YES on success.
+BOOL ZombieInit() {
+ static BOOL initialized = NO;
+ if (initialized)
+ return YES;
+
+ Class rootClass = [NSObject class];
+ g_originalDeallocIMP =
+ class_getMethodImplementation(rootClass, @selector(dealloc));
+ // objc_getClass() so CrZombie doesn't need +class.
+ g_zombieClass = objc_getClass("CrZombie");
+ g_fatZombieClass = objc_getClass("CrFatZombie");
+ g_fatZombieSize = class_getInstanceSize(g_fatZombieClass);
+
+ if (!g_originalDeallocIMP || !g_zombieClass || !g_fatZombieClass)
+ return NO;
+
+ initialized = YES;
+ return YES;
+}
+
+} // namespace
+
+@implementation CrZombie
+
+// The Objective-C runtime needs to be able to call this successfully.
++ (void)initialize {
+}
+
+// Any method not explicitly defined will end up here, forcing a
+// crash.
+- (id)forwardingTargetForSelector:(SEL)aSelector {
+ ZombieObjectCrash(self, aSelector, NULL);
+ return nil;
+}
+
+// Override a few methods often used for dynamic dispatch to log the
+// message the caller is attempting to send, rather than the utility
+// method being used to send it.
+- (BOOL)respondsToSelector:(SEL)aSelector {
+ ZombieObjectCrash(self, aSelector, _cmd);
+ return NO;
+}
+
+- (id)performSelector:(SEL)aSelector {
+ ZombieObjectCrash(self, aSelector, _cmd);
+ return nil;
+}
+
+- (id)performSelector:(SEL)aSelector withObject:(id)anObject {
+ ZombieObjectCrash(self, aSelector, _cmd);
+ return nil;
+}
+
+- (id)performSelector:(SEL)aSelector
+ withObject:(id)anObject
+ withObject:(id)anotherObject {
+ ZombieObjectCrash(self, aSelector, _cmd);
+ return nil;
+}
+
+- (void)performSelector:(SEL)aSelector
+ withObject:(id)anArgument
+ afterDelay:(NSTimeInterval)delay {
+ ZombieObjectCrash(self, aSelector, _cmd);
+}
+
+@end
+
+@implementation CrFatZombie
+
+// This implementation intentionally left empty.
+
+@end
+
+@implementation NSObject (CrZombie)
+
+- (BOOL)shouldBecomeCrZombie {
+ return NO;
+}
+
+@end
+
+namespace ObjcEvilDoers {
+
+bool ZombieEnable(bool zombieAllObjects,
+ size_t zombieCount) {
+ // Only allow enable/disable on the main thread, just to keep things
+ // simple.
+ DCHECK([NSThread isMainThread]);
+
+ if (!ZombieInit())
+ return false;
+
+ g_zombieAllObjects = zombieAllObjects;
+
+ // Replace the implementation of -[NSObject dealloc].
+ Method m = class_getInstanceMethod([NSObject class], @selector(dealloc));
+ if (!m)
+ return false;
+
+ const IMP prevDeallocIMP = method_setImplementation(m, (IMP)ZombieDealloc);
+ DCHECK(prevDeallocIMP == g_originalDeallocIMP ||
+ prevDeallocIMP == (IMP)ZombieDealloc);
+
+ // Grab the current set of zombies. This is thread-safe because
+ // only the main thread can change these.
+ const size_t oldCount = g_zombieCount;
+ ZombieRecord* oldZombies = g_zombies;
+
+ {
+ base::AutoLock pin(GetLock());
+
+ // Save the old index in case zombies need to be transferred.
+ size_t oldIndex = g_zombieIndex;
+
+ // Create the new zombie treadmill, disabling zombies in case of
+ // failure.
+ g_zombieIndex = 0;
+ g_zombieCount = zombieCount;
+ g_zombies = NULL;
+ if (g_zombieCount) {
+ g_zombies =
+ static_cast<ZombieRecord*>(calloc(g_zombieCount, sizeof(*g_zombies)));
+ if (!g_zombies) {
+ NOTREACHED();
+ g_zombies = oldZombies;
+ g_zombieCount = oldCount;
+ g_zombieIndex = oldIndex;
+ ZombieDisable();
+ return false;
+ }
+ }
+
+ // If the count is changing, allow some of the zombies to continue
+ // shambling forward.
+ const size_t sharedCount = std::min(oldCount, zombieCount);
+ if (sharedCount) {
+ // Get index of the first shared zombie.
+ oldIndex = (oldIndex + oldCount - sharedCount) % oldCount;
+
+ for (; g_zombieIndex < sharedCount; ++ g_zombieIndex) {
+ DCHECK_LT(g_zombieIndex, g_zombieCount);
+ DCHECK_LT(oldIndex, oldCount);
+ std::swap(g_zombies[g_zombieIndex], oldZombies[oldIndex]);
+ oldIndex = (oldIndex + 1) % oldCount;
+ }
+ g_zombieIndex %= g_zombieCount;
+ }
+ }
+
+ // Free the old treadmill and any remaining zombies.
+ if (oldZombies) {
+ for (size_t i = 0; i < oldCount; ++i) {
+ if (oldZombies[i].object)
+ object_dispose(oldZombies[i].object);
+ }
+ free(oldZombies);
+ }
+
+ return true;
+}
+
+void ZombieDisable() {
+ // Only allow enable/disable on the main thread, just to keep things
+ // simple.
+ DCHECK([NSThread isMainThread]);
+
+ // |ZombieInit()| was never called.
+ if (!g_originalDeallocIMP)
+ return;
+
+ // Put back the original implementation of -[NSObject dealloc].
+ Method m = class_getInstanceMethod([NSObject class], @selector(dealloc));
+ DCHECK(m);
+ method_setImplementation(m, g_originalDeallocIMP);
+
+ // Can safely grab this because it only happens on the main thread.
+ const size_t oldCount = g_zombieCount;
+ ZombieRecord* oldZombies = g_zombies;
+
+ {
+ base::AutoLock pin(GetLock()); // In case any -dealloc are in progress.
+ g_zombieCount = 0;
+ g_zombies = NULL;
+ }
+
+ // Free any remaining zombies.
+ if (oldZombies) {
+ for (size_t i = 0; i < oldCount; ++i) {
+ if (oldZombies[i].object)
+ object_dispose(oldZombies[i].object);
+ }
+ free(oldZombies);
+ }
+}
+
+} // namespace ObjcEvilDoers
diff --git a/components/crash/core/common/objc_zombie_unittest.mm b/components/crash/core/common/objc_zombie_unittest.mm
new file mode 100644
index 0000000..812cff8
--- /dev/null
+++ b/components/crash/core/common/objc_zombie_unittest.mm
@@ -0,0 +1,98 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#import <Foundation/Foundation.h>
+#include <objc/runtime.h>
+
+#include "base/logging.h"
+#import "base/mac/scoped_nsobject.h"
+#import "components/crash/core/common/objc_zombie.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/platform_test.h"
+
+@interface ZombieCxxDestructTest : NSObject
+{
+ base::scoped_nsobject<id> aRef_;
+}
+- (id)initWith:(id)anObject;
+@end
+
+@implementation ZombieCxxDestructTest
+- (id)initWith:(id)anObject {
+ self = [super init];
+ if (self) {
+ aRef_.reset([anObject retain]);
+ }
+ return self;
+}
+@end
+
+@interface ZombieAssociatedObjectTest : NSObject
+- (id)initWithAssociatedObject:(id)anObject;
+@end
+
+@implementation ZombieAssociatedObjectTest
+
+- (id)initWithAssociatedObject:(id)anObject {
+ if ((self = [super init])) {
+ // The address of the variable itself is the unique key, the
+ // contents don't matter.
+ static char kAssociatedObjectKey = 'x';
+ objc_setAssociatedObject(
+ self, &kAssociatedObjectKey, anObject, OBJC_ASSOCIATION_RETAIN);
+ }
+ return self;
+}
+
+@end
+
+namespace {
+
+// Verify that the C++ destructors run when the last reference to the
+// object is released.
+// NOTE(shess): To test the negative, comment out the |g_objectDestruct()|
+// call in |ZombieDealloc()|.
+TEST(ObjcZombieTest, CxxDestructors) {
+ base::scoped_nsobject<id> anObject([[NSObject alloc] init]);
+ EXPECT_EQ(1u, [anObject retainCount]);
+
+ ASSERT_TRUE(ObjcEvilDoers::ZombieEnable(YES, 100));
+
+ base::scoped_nsobject<ZombieCxxDestructTest> soonInfected(
+ [[ZombieCxxDestructTest alloc] initWith:anObject]);
+ EXPECT_EQ(2u, [anObject retainCount]);
+
+ // When |soonInfected| becomes a zombie, the C++ destructors should
+ // run and release a reference to |anObject|.
+ soonInfected.reset();
+ EXPECT_EQ(1u, [anObject retainCount]);
+
+ // The local reference should remain (C++ destructors aren't re-run).
+ ObjcEvilDoers::ZombieDisable();
+ EXPECT_EQ(1u, [anObject retainCount]);
+}
+
+// Verify that the associated objects are released when the object is
+// released.
+TEST(ObjcZombieTest, AssociatedObjectsReleased) {
+ base::scoped_nsobject<id> anObject([[NSObject alloc] init]);
+ EXPECT_EQ(1u, [anObject retainCount]);
+
+ ASSERT_TRUE(ObjcEvilDoers::ZombieEnable(YES, 100));
+
+ base::scoped_nsobject<ZombieAssociatedObjectTest> soonInfected(
+ [[ZombieAssociatedObjectTest alloc] initWithAssociatedObject:anObject]);
+ EXPECT_EQ(2u, [anObject retainCount]);
+
+ // When |soonInfected| becomes a zombie, the associated object
+ // should be released.
+ soonInfected.reset();
+ EXPECT_EQ(1u, [anObject retainCount]);
+
+ // The local reference should remain (associated objects not re-released).
+ ObjcEvilDoers::ZombieDisable();
+ EXPECT_EQ(1u, [anObject retainCount]);
+}
+
+} // namespace
diff --git a/components/metrics/BUILD.gn b/components/metrics/BUILD.gn
new file mode 100644
index 0000000..e17fd20
--- /dev/null
+++ b/components/metrics/BUILD.gn
@@ -0,0 +1,473 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# TODO(b/283275484): Remove this once test.gni is stubbed for Cobalt.
+if (!use_cobalt_customizations) {
+ import("//testing/test.gni")
+}
+
+static_library("metrics") {
+ sources = [
+ "call_stack_profile_metrics_provider.cc",
+ "call_stack_profile_metrics_provider.h",
+ "clean_exit_beacon.cc",
+ "clean_exit_beacon.h",
+ "client_info.cc",
+ "client_info.h",
+ "cloned_install_detector.cc",
+ "cloned_install_detector.h",
+ "daily_event.cc",
+ "daily_event.h",
+ "data_use_tracker.cc",
+ "data_use_tracker.h",
+ "delegating_provider.cc",
+ "delegating_provider.h",
+ "drive_metrics_provider.cc",
+ "drive_metrics_provider.h",
+ "drive_metrics_provider_android.cc",
+ "drive_metrics_provider_ios.mm",
+ "drive_metrics_provider_linux.cc",
+ "drive_metrics_provider_mac.mm",
+ "drive_metrics_provider_win.cc",
+ "enabled_state_provider.cc",
+ "enabled_state_provider.h",
+ "environment_recorder.cc",
+ "environment_recorder.h",
+ "execution_phase.cc",
+ "execution_phase.h",
+ "expired_histogram_util.cc",
+ "expired_histogram_util.h",
+ "expired_histograms_checker.cc",
+ "expired_histograms_checker.h",
+ "field_trials_provider.cc",
+ "field_trials_provider.h",
+ "file_metrics_provider.cc",
+ "file_metrics_provider.h",
+ "histogram_encoder.cc",
+ "histogram_encoder.h",
+ "log_decoder.cc",
+ "log_decoder.h",
+ "log_store.h",
+ "machine_id_provider.h",
+ "machine_id_provider_stub.cc",
+ "machine_id_provider_win.cc",
+ "metrics_log.cc",
+ "metrics_log.h",
+ "metrics_log_manager.cc",
+ "metrics_log_manager.h",
+ "metrics_log_store.cc",
+ "metrics_log_store.h",
+ "metrics_log_uploader.h",
+ "metrics_pref_names.cc",
+ "metrics_pref_names.h",
+ "metrics_provider.cc",
+ "metrics_provider.h",
+ "metrics_reporting_default_state.cc",
+ "metrics_reporting_default_state.h",
+ "metrics_reporting_service.cc",
+ "metrics_reporting_service.h",
+ "metrics_rotation_scheduler.cc",
+ "metrics_rotation_scheduler.h",
+ "metrics_scheduler.cc",
+ "metrics_scheduler.h",
+ "metrics_service.cc",
+ "metrics_service.h",
+ "metrics_service_accessor.cc",
+ "metrics_service_accessor.h",
+ "metrics_service_client.cc",
+ "metrics_service_client.h",
+ "metrics_state_manager.cc",
+ "metrics_state_manager.h",
+ "metrics_switches.cc",
+ "metrics_switches.h",
+ "metrics_upload_scheduler.cc",
+ "metrics_upload_scheduler.h",
+ "persisted_logs.cc",
+ "persisted_logs.h",
+ "persisted_logs_metrics.h",
+ "persisted_logs_metrics_impl.cc",
+ "persisted_logs_metrics_impl.h",
+ "persistent_system_profile.cc",
+ "persistent_system_profile.h",
+ "reporting_service.cc",
+ "reporting_service.h",
+ "stability_metrics_helper.cc",
+ "stability_metrics_helper.h",
+ "stability_metrics_provider.cc",
+ "stability_metrics_provider.h",
+ "system_memory_stats_recorder.h",
+ "system_memory_stats_recorder_linux.cc",
+ "system_memory_stats_recorder_win.cc",
+ "system_session_analyzer_win.cc",
+ "system_session_analyzer_win.h",
+ "url_constants.cc",
+ "url_constants.h",
+ "version_utils.cc",
+ "version_utils.h",
+ ]
+
+ public_deps = [
+ "//third_party/metrics_proto",
+ ]
+
+ deps = [
+ "//base",
+ "//base:base_static",
+ "//components/prefs",
+ "//components/variations",
+ "//components/version_info:version_info",
+ "//extensions/buildflags",
+ "//third_party/zlib/google:compression_utils",
+ ]
+
+ if (use_cobalt_customizations) {
+ sources -= [
+ # All code should be OS agnostic above SB.
+ "drive_metrics_provider_android.cc",
+ "drive_metrics_provider_ios.mm",
+ "drive_metrics_provider_linux.cc",
+ "drive_metrics_provider_mac.mm",
+ "drive_metrics_provider_win.cc",
+ # Too many non-existent SB functionalities (e.g., base::TouchFile,
+ # MemoryMapped files, etc).
+ "file_metrics_provider.cc",
+ "file_metrics_provider.h",
+ # All code should be OS agnostic above SB.
+ "machine_id_provider_win.cc",
+ "system_memory_stats_recorder_linux.cc",
+ "system_memory_stats_recorder_win.cc",
+ "system_session_analyzer_win.cc",
+ "system_session_analyzer_win.h",
+ ]
+ }
+
+ if (is_chromeos) {
+ deps += [ ":serialization" ]
+ }
+
+ if (is_mac) {
+ libs = [
+ # The below are all needed for drive_metrics_provider_mac.mm.
+ "CoreFoundation.framework",
+ "DiskArbitration.framework",
+ "Foundation.framework",
+ "IOKit.framework",
+ ]
+ }
+
+ if (is_win) {
+ sources -= [ "machine_id_provider_stub.cc" ]
+ deps += [ "//components/browser_watcher:stability_client" ]
+ libs = [ "wevtapi.lib" ]
+ }
+
+ if (is_fuchsia) {
+ sources += [ "drive_metrics_provider_fuchsia.cc" ]
+ }
+}
+
+if (!use_cobalt_customizations) {
+ # The component metrics provider is a separate target because it depends upon
+ # (the large) component_updater code, and is not needed for some entities that
+ # depend on :metrics.
+ static_library("component_metrics") {
+ sources = [
+ "component_metrics_provider.cc",
+ "component_metrics_provider.h",
+ ]
+
+ public_deps = [
+ "//third_party/metrics_proto",
+ ]
+
+ deps = [
+ ":metrics",
+ "//base",
+ "//components/component_updater",
+ ]
+ }
+}
+
+if (!is_ios && !use_cobalt_customizations) {
+ static_library("gpu") {
+ sources = [
+ "gpu/gpu_metrics_provider.cc",
+ "gpu/gpu_metrics_provider.h",
+ ]
+
+ public_deps = [
+ ":metrics",
+ ]
+ deps = [
+ "//base",
+ "//content/public/browser",
+ "//gpu/config",
+ ]
+ }
+}
+
+if (!use_cobalt_customizations) {
+ static_library("net") {
+ sources = [
+ "net/cellular_logic_helper.cc",
+ "net/cellular_logic_helper.h",
+ "net/net_metrics_log_uploader.cc",
+ "net/net_metrics_log_uploader.h",
+ "net/network_metrics_provider.cc",
+ "net/network_metrics_provider.h",
+ "net/wifi_access_point_info_provider.cc",
+ "net/wifi_access_point_info_provider.h",
+ ]
+
+ public_deps = [
+ ":metrics",
+ ]
+ allow_circular_includes_from = [ ":metrics" ]
+
+ deps = [
+ "//base",
+ "//components/data_use_measurement/core",
+ "//components/encrypted_messages:encrypted_message_proto",
+ "//components/encrypted_messages:encrypted_messages",
+ "//components/variations",
+ "//net",
+ "//services/network/public/cpp:cpp",
+ "//third_party/metrics_proto",
+ "//third_party/zlib/google:compression_utils",
+ "//url",
+ ]
+
+ if (is_chromeos) {
+ sources += [
+ "net/wifi_access_point_info_provider_chromeos.cc",
+ "net/wifi_access_point_info_provider_chromeos.h",
+ ]
+ deps += [ "//chromeos" ]
+ }
+ }
+
+ static_library("ui") {
+ sources = [
+ "ui/screen_info_metrics_provider.cc",
+ "ui/screen_info_metrics_provider.h",
+ ]
+
+ public_deps = [
+ ":metrics",
+ ]
+ deps = [
+ "//base",
+ "//ui/display",
+ "//ui/gfx",
+ "//ui/gfx/geometry",
+ ]
+ }
+
+ static_library("single_sample_metrics") {
+ sources = [
+ "single_sample_metrics.cc",
+ "single_sample_metrics.h",
+ "single_sample_metrics_factory_impl.cc",
+ "single_sample_metrics_factory_impl.h",
+ ]
+
+ deps = [
+ "//mojo/public/cpp/bindings",
+ "//services/service_manager/public/cpp",
+ "//services/service_manager/public/mojom",
+ ]
+
+ public_deps = [
+ "//components/metrics/public/interfaces:single_sample_metrics_mojo_bindings",
+ ]
+ }
+
+
+ source_set("call_stack_profile_params") {
+ public = [
+ "call_stack_profile_encoding.h",
+ "call_stack_profile_params.h",
+ ]
+ sources = [
+ "call_stack_profile_encoding.cc",
+ ]
+
+ deps = [
+ "//base:base",
+ "//third_party/metrics_proto",
+ ]
+ }
+
+ # Dependency for child processes that use the CallStackProfileBuilder.
+ source_set("child_call_stack_profile_builder") {
+ public = [
+ "call_stack_profile_builder.h",
+ "child_call_stack_profile_collector.h",
+ ]
+ sources = [
+ "call_stack_profile_builder.cc",
+ "child_call_stack_profile_collector.cc",
+ ]
+ public_deps = [
+ ":call_stack_profile_params",
+ ]
+ deps = [
+ "//base",
+ "//components/metrics/public/interfaces:call_stack_mojo_bindings",
+ "//third_party/metrics_proto",
+ ]
+
+ # This target must not depend on :metrics because that code is intended solely
+ # for use in the browser process.
+ assert_no_deps = [ ":metrics" ]
+ }
+
+ # Dependency for browser process use of the CallStackProfileBuilder.
+ source_set("call_stack_profile_builder") {
+ deps = [
+ ":metrics",
+ ]
+ public_deps = [
+ ":child_call_stack_profile_builder",
+ ]
+ }
+
+ # The browser process mojo service for collecting profiles from child
+ # processes.
+ source_set("call_stack_profile_collector") {
+ sources = [
+ "call_stack_profile_collector.cc",
+ "call_stack_profile_collector.h",
+ ]
+ deps = [
+ ":call_stack_profile_params",
+ ":metrics",
+ "//components/metrics/public/interfaces:call_stack_mojo_bindings",
+ ]
+ }
+}
+
+static_library("test_support") {
+ testonly = true
+ sources = [
+ "test_enabled_state_provider.cc",
+ "test_enabled_state_provider.h",
+ "test_metrics_log_uploader.cc",
+ "test_metrics_log_uploader.h",
+ "test_metrics_provider.cc",
+ "test_metrics_provider.h",
+ "test_metrics_service_client.cc",
+ "test_metrics_service_client.h",
+ ]
+
+ public_deps = [
+ ":metrics",
+ ]
+ deps = [
+ "//base",
+ ]
+}
+
+if (is_linux) {
+ static_library("serialization") {
+ sources = [
+ "serialization/metric_sample.cc",
+ "serialization/metric_sample.h",
+ "serialization/serialization_utils.cc",
+ "serialization/serialization_utils.h",
+ ]
+ deps = [
+ "//base",
+ ]
+ }
+}
+# TODO(b/283275474): Re-enable as many of these tests as possible.
+if (!use_cobalt_customizations) {
+ source_set("unit_tests") {
+ testonly = true
+ sources = [
+ "call_stack_profile_builder_unittest.cc",
+ "call_stack_profile_metrics_provider_unittest.cc",
+ "child_call_stack_profile_collector_unittest.cc",
+ "cloned_install_detector_unittest.cc",
+ "component_metrics_provider_unittest.cc",
+ "daily_event_unittest.cc",
+ "data_use_tracker_unittest.cc",
+ "drive_metrics_provider_unittest.cc",
+ "environment_recorder_unittest.cc",
+ "expired_histograms_checker_unittest.cc",
+ "field_trials_provider_unittest.cc",
+ "file_metrics_provider_unittest.cc",
+ "histogram_encoder_unittest.cc",
+ "machine_id_provider_win_unittest.cc",
+ "metrics_log_manager_unittest.cc",
+ "metrics_log_store_unittest.cc",
+ "metrics_log_unittest.cc",
+ "metrics_service_unittest.cc",
+ "metrics_state_manager_unittest.cc",
+ "net/net_metrics_log_uploader_unittest.cc",
+ "net/network_metrics_provider_unittest.cc",
+ "persisted_logs_unittest.cc",
+ "persistent_system_profile_unittest.cc",
+ "reporting_service_unittest.cc",
+ "single_sample_metrics_factory_impl_unittest.cc",
+ "stability_metrics_helper_unittest.cc",
+ "stability_metrics_provider_unittest.cc",
+ "system_session_analyzer_win_unittest.cc",
+ "ui/screen_info_metrics_provider_unittest.cc",
+ ]
+
+ deps = [
+ ":call_stack_profile_builder",
+ ":component_metrics",
+ ":metrics",
+ ":net",
+ ":single_sample_metrics",
+ ":test_support",
+ ":ui",
+ "//base/test:test_support",
+ "//components/component_updater:test_support",
+ "//components/encrypted_messages:encrypted_message_proto",
+ "//components/metrics/public/cpp:call_stack_unit_tests",
+ "//components/prefs:test_support",
+ "//components/variations",
+ "//extensions/buildflags",
+ "//mojo/public/cpp/bindings",
+ "//net:test_support",
+ "//services/network:test_support",
+ "//services/network/public/cpp:cpp",
+ "//services/service_manager/public/cpp",
+ "//testing/gtest",
+ "//third_party/zlib/google:compression_utils",
+ "//ui/gfx/geometry",
+ ]
+
+ if (is_linux) {
+ sources += [ "serialization/serialization_utils_unittest.cc" ]
+ deps += [ ":serialization" ]
+ }
+
+ if (is_chromeos) {
+ deps += [ "//chromeos" ]
+ }
+
+ # iOS is not supported by the profiler and the ios-simulator bot chokes on
+ # these tests.
+ if (is_ios) {
+ sources -= [ "child_call_stack_profile_collector_unittest.cc" ]
+ deps -= [ "//components/metrics/public/cpp:call_stack_unit_tests" ]
+ }
+ }
+
+ # Convenience testing target
+ test("metrics_unittests") {
+ sources = [
+ "//components/test/run_all_unittests.cc",
+ ]
+ deps = [
+ ":unit_tests",
+ "//components/test:test_support",
+ ]
+ }
+}
diff --git a/components/metrics/DEPS b/components/metrics/DEPS
new file mode 100644
index 0000000..1457333
--- /dev/null
+++ b/components/metrics/DEPS
@@ -0,0 +1,19 @@
+# This component is shared with the Chrome OS build, so it's important to limit
+# dependencies to a minimal set.
+include_rules = [
+ "-components",
+ "+components/browser_watcher",
+ "+components/component_updater",
+ "+components/compression",
+ "+components/metrics",
+ "+components/prefs",
+ "+components/variations",
+ "+components/version_info",
+ "+content/public/test",
+ "+extensions/buildflags",
+ "+mojo/public/cpp",
+ "+services/service_manager/public/cpp",
+ "+third_party/metrics_proto",
+ "+third_party/zlib/google",
+ "-net",
+]
diff --git a/components/metrics/METADATA b/components/metrics/METADATA
new file mode 100644
index 0000000..207ab02
--- /dev/null
+++ b/components/metrics/METADATA
@@ -0,0 +1,16 @@
+name: "metrics"
+description:
+ "Subtree at components/metrics."
+
+third_party {
+ url {
+ type: GIT
+ value: "https://chromium.googlesource.com/chromium/src/components/metrics"
+ }
+ version: "734293972a4b3f6d949e489ac02145538fb59c70"
+ last_upgrade_date {
+ year: 2018
+ month: 08
+ day: 30
+ }
+}
diff --git a/components/metrics/OWNERS b/components/metrics/OWNERS
new file mode 100644
index 0000000..68c3f9c
--- /dev/null
+++ b/components/metrics/OWNERS
@@ -0,0 +1,5 @@
+file://base/metrics/OWNERS
+
+per-file *call_stack_profile*=wittman@chromium.org
+
+# COMPONENT: Internals>Metrics
diff --git a/components/metrics/README b/components/metrics/README
new file mode 100644
index 0000000..5a2abbb
--- /dev/null
+++ b/components/metrics/README
@@ -0,0 +1,23 @@
+This component contains the base classes for the metrics service and only
+depends on //base. It is used by ChromeOS as the base for a standalone service
+that will upload the metrics when ChromeOS is not installed (headless install).
+
+This is the first step towards the componentization of metrics that will happen
+later this spring.
+
+A proposed structure for the metrics component is:
+//components/metrics/base,
+ Depends on base only. Contains the protobuf definitions.
+//components/metrics/core
+ Depends on everything iOS depends on
+//components/metrics/content
+ Depends on content
+
+Ideally, the component would abstract the network stack and have a clean
+separation between the metrics upload logic (protbuf generation, retry, etc...),
+the chrome part (gathering histogram from all the threads, populating the
+log with hardware characteristics, plugin state, etc.).
+
+It is a plus if the code currently in the component (i.e., the code that can
+depend only on //base) stays in a single directory as it would be easier
+for ChromeOS to pull it :).
diff --git a/components/metrics/call_stack_profile_builder.cc b/components/metrics/call_stack_profile_builder.cc
new file mode 100644
index 0000000..7b2890f
--- /dev/null
+++ b/components/metrics/call_stack_profile_builder.cc
@@ -0,0 +1,346 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/call_stack_profile_builder.h"
+
+#include <string>
+#include <utility>
+
+#include "base/atomicops.h"
+#include "base/files/file_path.h"
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/metrics/metrics_hashes.h"
+#include "base/no_destructor.h"
+#include "base/stl_util.h"
+#include "components/metrics/call_stack_profile_encoding.h"
+
+namespace metrics {
+
+namespace {
+
+// Only used by child processes.
+base::LazyInstance<ChildCallStackProfileCollector>::Leaky
+ g_child_call_stack_profile_collector = LAZY_INSTANCE_INITIALIZER;
+
+base::RepeatingCallback<void(base::TimeTicks, SampledProfile)>&
+GetBrowserProcessReceiverCallbackInstance() {
+ static base::NoDestructor<
+ base::RepeatingCallback<void(base::TimeTicks, SampledProfile)>>
+ instance;
+ return *instance;
+}
+
+// Identifies an unknown module.
+const size_t kUnknownModuleIndex = static_cast<size_t>(-1);
+
+// This global variables holds the current system state and is recorded with
+// every captured sample, done on a separate thread which is why updates to
+// this must be atomic. A PostTask to move the the updates to that thread
+// would skew the timing and a lock could result in deadlock if the thread
+// making a change was also being profiled and got stopped.
+static base::subtle::Atomic32 g_process_milestones = 0;
+
+void ChangeAtomicFlags(base::subtle::Atomic32* flags,
+ base::subtle::Atomic32 set,
+ base::subtle::Atomic32 clear) {
+ DCHECK(set != 0 || clear != 0);
+ DCHECK_EQ(0, set & clear);
+
+ base::subtle::Atomic32 bits = base::subtle::NoBarrier_Load(flags);
+ while (true) {
+ base::subtle::Atomic32 existing = base::subtle::NoBarrier_CompareAndSwap(
+ flags, bits, (bits | set) & ~clear);
+ if (existing == bits)
+ break;
+ bits = existing;
+ }
+}
+
+// Provide a mapping from the C++ "enum" definition of various process mile-
+// stones to the equivalent protobuf "enum" definition. This table-lookup
+// conversion allows for the implementation to evolve and still be compatible
+// with the protobuf -- even if there are ever more than 32 defined proto
+// values, though never more than 32 could be in-use in a given C++ version
+// of the code.
+const ProcessPhase kProtoPhases[CallStackProfileBuilder::MILESTONES_MAX_VALUE] =
+ {
+ ProcessPhase::MAIN_LOOP_START,
+ ProcessPhase::MAIN_NAVIGATION_START,
+ ProcessPhase::MAIN_NAVIGATION_FINISHED,
+ ProcessPhase::FIRST_NONEMPTY_PAINT,
+
+ ProcessPhase::SHUTDOWN_START,
+};
+
+// These functions are used to encode protobufs. --------------------------
+
+// The protobuf expects the MD5 checksum prefix of the module name.
+uint64_t HashModuleFilename(const base::FilePath& filename) {
+ const base::FilePath::StringType basename = filename.BaseName().value();
+ // Copy the bytes in basename into a string buffer.
+ size_t basename_length_in_bytes =
+ basename.size() * sizeof(base::FilePath::CharType);
+ std::string name_bytes(basename_length_in_bytes, '\0');
+ memcpy(&name_bytes[0], &basename[0], basename_length_in_bytes);
+ return base::HashMetricName(name_bytes);
+}
+
+// Transcode |sample| into |proto_sample|, using base addresses in |modules| to
+// compute module instruction pointer offsets.
+void CopySampleToProto(const CallStackProfileBuilder::Sample& sample,
+ const std::vector<base::ModuleCache::Module>& modules,
+ CallStackProfile::Sample* proto_sample) {
+ for (const auto& frame : sample.frames) {
+ CallStackProfile::Location* location = proto_sample->add_frame();
+ // A frame may not have a valid module. If so, we can't compute the
+ // instruction pointer offset, and we don't want to send bare pointers,
+ // so leave call_stack_entry empty.
+ if (frame.module_index == kUnknownModuleIndex)
+ continue;
+ int64_t module_offset =
+ reinterpret_cast<const char*>(frame.instruction_pointer) -
+ reinterpret_cast<const char*>(modules[frame.module_index].base_address);
+ DCHECK_GE(module_offset, 0);
+ location->set_address(static_cast<uint64_t>(module_offset));
+ location->set_module_id_index(frame.module_index);
+ }
+}
+
+// Transcode Sample annotations into protobuf fields. The C++ code uses a
+// bit- field with each bit corresponding to an entry in an enumeration
+// while the protobuf uses a repeated field of individual values. Conversion
+// tables allow for arbitrary mapping, though no more than 32 in any given
+// version of the code.
+void CopyAnnotationsToProto(uint32_t new_milestones,
+ CallStackProfile::Sample* sample_proto) {
+ for (size_t bit = 0; new_milestones != 0 && bit < sizeof(new_milestones) * 8;
+ ++bit) {
+ const uint32_t flag = 1U << bit;
+ if (new_milestones & flag) {
+ if (bit >= base::size(kProtoPhases)) {
+ NOTREACHED();
+ continue;
+ }
+ sample_proto->add_process_phase(kProtoPhases[bit]);
+ new_milestones ^= flag; // Bit is set so XOR will clear it.
+ }
+ }
+}
+
+} // namespace
+
+// CallStackProfileBuilder::Frame ---------------------------------------------
+
+CallStackProfileBuilder::Frame::Frame(uintptr_t instruction_pointer,
+ size_t module_index)
+ : instruction_pointer(instruction_pointer), module_index(module_index) {}
+
+CallStackProfileBuilder::Frame::~Frame() = default;
+
+CallStackProfileBuilder::Frame::Frame()
+ : instruction_pointer(0), module_index(kUnknownModuleIndex) {}
+
+// CallStackProfileBuilder::Sample --------------------------------------------
+
+CallStackProfileBuilder::Sample::Sample() = default;
+
+CallStackProfileBuilder::Sample::Sample(const Sample& sample) = default;
+
+CallStackProfileBuilder::Sample::~Sample() = default;
+
+CallStackProfileBuilder::Sample::Sample(const Frame& frame) {
+ frames.push_back(std::move(frame));
+}
+
+CallStackProfileBuilder::Sample::Sample(const std::vector<Frame>& frames)
+ : frames(frames) {}
+
+CallStackProfileBuilder::CallStackProfileBuilder(
+ const CallStackProfileParams& profile_params,
+ base::OnceClosure completed_callback)
+ : profile_params_(profile_params),
+ profile_start_time_(base::TimeTicks::Now()) {
+ completed_callback_ = std::move(completed_callback);
+}
+
+CallStackProfileBuilder::~CallStackProfileBuilder() = default;
+
+void CallStackProfileBuilder::RecordAnnotations() {
+ // The code inside this method must not do anything that could acquire a
+ // mutex, including allocating memory (which includes LOG messages) because
+ // that mutex could be held by a stopped thread, thus resulting in deadlock.
+ sample_.process_milestones =
+ base::subtle::NoBarrier_Load(&g_process_milestones);
+}
+
+void CallStackProfileBuilder::OnSampleCompleted(
+ std::vector<base::StackSamplingProfiler::Frame> frames) {
+ // Assemble sample_ from |frames| first.
+ for (const auto& frame : frames) {
+ const base::ModuleCache::Module& module(frame.module);
+ if (!module.is_valid) {
+ sample_.frames.emplace_back(frame.instruction_pointer,
+ kUnknownModuleIndex);
+ continue;
+ }
+
+ // Dedup modules and cache them in modules_.
+ auto loc = module_index_.find(module.base_address);
+ if (loc == module_index_.end()) {
+ modules_.push_back(module);
+ size_t index = modules_.size() - 1;
+ loc = module_index_.insert(std::make_pair(module.base_address, index))
+ .first;
+ }
+ sample_.frames.emplace_back(frame.instruction_pointer, loc->second);
+ }
+
+ // Write CallStackProfile::Sample protocol buffer message based on sample_.
+ int existing_sample_index = -1;
+ auto location = sample_index_.find(sample_);
+ if (location != sample_index_.end())
+ existing_sample_index = location->second;
+
+ if (existing_sample_index != -1) {
+ CallStackProfile::Sample* sample_proto =
+ proto_profile_.mutable_deprecated_sample(existing_sample_index);
+ sample_proto->set_count(sample_proto->count() + 1);
+ return;
+ }
+
+ CallStackProfile::Sample* sample_proto =
+ proto_profile_.add_deprecated_sample();
+ CopySampleToProto(sample_, modules_, sample_proto);
+ sample_proto->set_count(1);
+ CopyAnnotationsToProto(sample_.process_milestones & ~milestones_,
+ sample_proto);
+ milestones_ = sample_.process_milestones;
+
+ sample_index_.insert(std::make_pair(
+ sample_, static_cast<int>(proto_profile_.deprecated_sample_size()) - 1));
+
+ sample_ = Sample();
+}
+
+// Build a SampledProfile in the protocol buffer message format from the
+// collected sampling data. The message is then passed to
+// CallStackProfileMetricsProvider or ChildCallStackProfileCollector.
+
+// A SampledProfile message (third_party/metrics_proto/sampled_profile.proto)
+// contains a CallStackProfile message
+// (third_party/metrics_proto/call_stack_profile.proto) and associated profile
+// parameters (process/thread/trigger event). A CallStackProfile message
+// contains a set of Sample messages and ModuleIdentifier messages, and other
+// sampling information. One Sample corresponds to a single recorded stack, and
+// the ModuleIdentifiers record those modules associated with the recorded stack
+// frames.
+void CallStackProfileBuilder::OnProfileCompleted(
+ base::TimeDelta profile_duration,
+ base::TimeDelta sampling_period) {
+ proto_profile_.set_profile_duration_ms(profile_duration.InMilliseconds());
+ proto_profile_.set_sampling_period_ms(sampling_period.InMilliseconds());
+
+ for (const auto& module : modules_) {
+ CallStackProfile::ModuleIdentifier* module_id =
+ proto_profile_.add_module_id();
+ module_id->set_build_id(module.id);
+ module_id->set_name_md5_prefix(HashModuleFilename(module.filename));
+ }
+
+ // Clear the caches etc.
+ modules_.clear();
+ module_index_.clear();
+ sample_index_.clear();
+
+ // Assemble the SampledProfile protocol buffer message and run the associated
+ // callback to pass it.
+ SampledProfile sampled_profile;
+ CallStackProfile* proto_profile =
+ sampled_profile.mutable_call_stack_profile();
+ *proto_profile = std::move(proto_profile_);
+
+ sampled_profile.set_process(
+ ToExecutionContextProcess(profile_params_.process));
+ sampled_profile.set_thread(ToExecutionContextThread(profile_params_.thread));
+ sampled_profile.set_trigger_event(
+ ToSampledProfileTriggerEvent(profile_params_.trigger));
+
+ PassProfilesToMetricsProvider(std::move(sampled_profile));
+
+ // Run the completed callback if there is one.
+ if (!completed_callback_.is_null())
+ std::move(completed_callback_).Run();
+}
+
+// static
+void CallStackProfileBuilder::SetBrowserProcessReceiverCallback(
+ const base::RepeatingCallback<void(base::TimeTicks, SampledProfile)>&
+ callback) {
+ GetBrowserProcessReceiverCallbackInstance() = callback;
+}
+
+void CallStackProfileBuilder::PassProfilesToMetricsProvider(
+ SampledProfile sampled_profile) {
+ if (profile_params_.process == CallStackProfileParams::BROWSER_PROCESS) {
+ GetBrowserProcessReceiverCallbackInstance().Run(profile_start_time_,
+ std::move(sampled_profile));
+ } else {
+ g_child_call_stack_profile_collector.Get()
+ .ChildCallStackProfileCollector::Collect(profile_start_time_,
+ std::move(sampled_profile));
+ }
+}
+
+// static
+void CallStackProfileBuilder::SetProcessMilestone(int milestone) {
+ DCHECK_LE(0, milestone);
+ DCHECK_GT(static_cast<int>(sizeof(g_process_milestones) * 8), milestone);
+ DCHECK_EQ(0, base::subtle::NoBarrier_Load(&g_process_milestones) &
+ (1 << milestone));
+ ChangeAtomicFlags(&g_process_milestones, 1 << milestone, 0);
+}
+
+// static
+void CallStackProfileBuilder::SetParentProfileCollectorForChildProcess(
+ metrics::mojom::CallStackProfileCollectorPtr browser_interface) {
+ g_child_call_stack_profile_collector.Get().SetParentProfileCollector(
+ std::move(browser_interface));
+}
+
+// These operators permit types to be compared and used in a map of Samples.
+
+bool operator==(const CallStackProfileBuilder::Sample& a,
+ const CallStackProfileBuilder::Sample& b) {
+ return a.process_milestones == b.process_milestones && a.frames == b.frames;
+}
+
+bool operator!=(const CallStackProfileBuilder::Sample& a,
+ const CallStackProfileBuilder::Sample& b) {
+ return !(a == b);
+}
+
+bool operator<(const CallStackProfileBuilder::Sample& a,
+ const CallStackProfileBuilder::Sample& b) {
+ if (a.process_milestones != b.process_milestones)
+ return a.process_milestones < b.process_milestones;
+
+ return a.frames < b.frames;
+}
+
+bool operator==(const CallStackProfileBuilder::Frame& a,
+ const CallStackProfileBuilder::Frame& b) {
+ return a.instruction_pointer == b.instruction_pointer &&
+ a.module_index == b.module_index;
+}
+
+bool operator<(const CallStackProfileBuilder::Frame& a,
+ const CallStackProfileBuilder::Frame& b) {
+ if (a.module_index != b.module_index)
+ return a.module_index < b.module_index;
+
+ return a.instruction_pointer < b.instruction_pointer;
+}
+
+} // namespace metrics
diff --git a/components/metrics/call_stack_profile_builder.h b/components/metrics/call_stack_profile_builder.h
new file mode 100644
index 0000000..deb2fea
--- /dev/null
+++ b/components/metrics/call_stack_profile_builder.h
@@ -0,0 +1,158 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_CALL_STACK_PROFILE_BUILDER_H_
+#define COMPONENTS_METRICS_CALL_STACK_PROFILE_BUILDER_H_
+
+#include <map>
+#include <vector>
+
+#include "base/callback.h"
+#include "base/profiler/stack_sampling_profiler.h"
+#include "base/sampling_heap_profiler/module_cache.h"
+#include "base/time/time.h"
+#include "components/metrics/call_stack_profile_params.h"
+#include "components/metrics/child_call_stack_profile_collector.h"
+#include "third_party/metrics_proto/sampled_profile.pb.h"
+
+namespace metrics {
+
+class SampledProfile;
+
+// An instance of the class is meant to be passed to base::StackSamplingProfiler
+// to collect profiles. The profiles collected are uploaded via the metrics log.
+class CallStackProfileBuilder
+ : public base::StackSamplingProfiler::ProfileBuilder {
+ public:
+ // Frame represents an individual sampled stack frame with module information.
+ struct Frame {
+ Frame(uintptr_t instruction_pointer, size_t module_index);
+ ~Frame();
+
+ // Default constructor to satisfy IPC macros. Do not use explicitly.
+ Frame();
+
+ // The sampled instruction pointer within the function.
+ uintptr_t instruction_pointer;
+
+ // Index of the module in the associated vector of mofules. We don't
+ // represent module state directly here to save space.
+ size_t module_index;
+ };
+
+ // Sample represents a set of stack frames with some extra information.
+ struct Sample {
+ Sample();
+ Sample(const Sample& sample);
+ ~Sample();
+
+ // These constructors are used only during testing.
+ Sample(const Frame& frame);
+ Sample(const std::vector<Frame>& frames);
+
+ // The entire stack frame when the sample is taken.
+ std::vector<Frame> frames;
+
+ // A bit-field indicating which process milestones have passed. This can be
+ // used to tell where in the process lifetime the samples are taken. Just
+ // as a "lifetime" can only move forward, these bits mark the milestones of
+ // the processes life as they occur. Bits can be set but never reset. The
+ // actual definition of the individual bits is left to the user of this
+ // module.
+ uint32_t process_milestones = 0;
+ };
+
+ // These milestones of a process lifetime can be passed as process "mile-
+ // stones" to CallStackProfileBuilder::SetProcessMilestone(). Be sure to
+ // update the translation constants at the top of the .cc file when this is
+ // changed.
+ enum Milestones : int {
+ MAIN_LOOP_START,
+ MAIN_NAVIGATION_START,
+ MAIN_NAVIGATION_FINISHED,
+ FIRST_NONEMPTY_PAINT,
+
+ SHUTDOWN_START,
+
+ MILESTONES_MAX_VALUE
+ };
+
+ // |completed_callback| is made when sampling a profile completes. Other
+ // threads, including the UI thread, may block on callback completion so this
+ // should run as quickly as possible.
+ //
+ // IMPORTANT NOTE: The callback is invoked on a thread the profiler
+ // constructs, rather than on the thread used to construct the profiler, and
+ // thus the callback must be callable on any thread.
+ CallStackProfileBuilder(
+ const CallStackProfileParams& profile_params,
+ base::OnceClosure completed_callback = base::OnceClosure());
+
+ ~CallStackProfileBuilder() override;
+
+ // base::StackSamplingProfiler::ProfileBuilder:
+ void RecordAnnotations() override;
+ void OnSampleCompleted(
+ std::vector<base::StackSamplingProfiler::Frame> frames) override;
+ void OnProfileCompleted(base::TimeDelta profile_duration,
+ base::TimeDelta sampling_period) override;
+
+ // Sets the callback to use for reporting browser process profiles. This
+ // indirection is required to avoid a dependency on unnecessary metrics code
+ // in child processes.
+ static void SetBrowserProcessReceiverCallback(
+ const base::RepeatingCallback<void(base::TimeTicks, SampledProfile)>&
+ callback);
+
+ // Sets the current system state that is recorded with each captured stack
+ // frame. This is thread-safe so can be called from anywhere. The parameter
+ // value should be from an enumeration of the appropriate type with values
+ // ranging from 0 to 31, inclusive. This sets bits within Sample field of
+ // |process_milestones|. The actual meanings of these bits are defined
+ // (globally) by the caller(s).
+ static void SetProcessMilestone(int milestone);
+
+ // Sets the CallStackProfileCollector interface from |browser_interface|.
+ // This function must be called within child processes.
+ static void SetParentProfileCollectorForChildProcess(
+ metrics::mojom::CallStackProfileCollectorPtr browser_interface);
+
+ protected:
+ // Test seam.
+ virtual void PassProfilesToMetricsProvider(SampledProfile sampled_profile);
+
+ private:
+ // The collected stack samples in proto buffer message format.
+ CallStackProfile proto_profile_;
+
+ // The current sample being recorded.
+ Sample sample_;
+
+ // The indexes of samples, indexed by the sample.
+ std::map<Sample, int> sample_index_;
+
+ // The indexes of modules, indexed by module's base_address.
+ std::map<uintptr_t, size_t> module_index_;
+
+ // The distinct modules in the current profile.
+ std::vector<base::ModuleCache::Module> modules_;
+
+ // The process milestones of a previous sample.
+ uint32_t milestones_ = 0;
+
+ // Callback made when sampling a profile completes.
+ base::OnceClosure completed_callback_;
+
+ // The parameters associated with the sampled profile.
+ const CallStackProfileParams profile_params_;
+
+ // The start time of a profile collection.
+ const base::TimeTicks profile_start_time_;
+
+ DISALLOW_COPY_AND_ASSIGN(CallStackProfileBuilder);
+};
+
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_CALL_STACK_PROFILE_BUILDER_H_
diff --git a/components/metrics/call_stack_profile_builder_unittest.cc b/components/metrics/call_stack_profile_builder_unittest.cc
new file mode 100644
index 0000000..89549c0
--- /dev/null
+++ b/components/metrics/call_stack_profile_builder_unittest.cc
@@ -0,0 +1,363 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/call_stack_profile_builder.h"
+
+#include "base/files/file_path.h"
+#include "base/sampling_heap_profiler/module_cache.h"
+#include "base/test/bind_test_util.h"
+#include "base/test/mock_callback.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+#include "components/metrics/call_stack_profile_params.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/metrics_proto/sampled_profile.pb.h"
+
+using Frame = base::StackSamplingProfiler::Frame;
+using Module = base::ModuleCache::Module;
+
+namespace metrics {
+
+namespace {
+
+constexpr CallStackProfileParams kProfileParams = {
+ CallStackProfileParams::BROWSER_PROCESS,
+ CallStackProfileParams::MAIN_THREAD,
+ CallStackProfileParams::PROCESS_STARTUP};
+
+class TestingCallStackProfileBuilder : public CallStackProfileBuilder {
+ public:
+ TestingCallStackProfileBuilder(
+ const CallStackProfileParams& profile_params,
+ base::OnceClosure completed_callback = base::OnceClosure());
+
+ ~TestingCallStackProfileBuilder() override;
+
+ const SampledProfile& sampled_profile() { return sampled_profile_; }
+
+ protected:
+ // Overridden for testing.
+ void PassProfilesToMetricsProvider(SampledProfile sampled_profile) override;
+
+ private:
+ // The completed profile.
+ SampledProfile sampled_profile_;
+};
+
+TestingCallStackProfileBuilder::TestingCallStackProfileBuilder(
+ const CallStackProfileParams& profile_params,
+ base::OnceClosure completed_callback)
+ : CallStackProfileBuilder(profile_params, std::move(completed_callback)) {}
+
+TestingCallStackProfileBuilder::~TestingCallStackProfileBuilder() = default;
+
+void TestingCallStackProfileBuilder::PassProfilesToMetricsProvider(
+ SampledProfile sampled_profile) {
+ sampled_profile_ = std::move(sampled_profile);
+}
+
+} // namespace
+
+TEST(CallStackProfileBuilderTest, SetProcessMilestone) {
+ auto profile_builder =
+ std::make_unique<TestingCallStackProfileBuilder>(kProfileParams);
+
+ // The default milestone is 0.
+ profile_builder->RecordAnnotations();
+ profile_builder->OnSampleCompleted(std::vector<Frame>());
+
+ CallStackProfileBuilder::SetProcessMilestone(1);
+ profile_builder->RecordAnnotations();
+ profile_builder->OnSampleCompleted(std::vector<Frame>());
+
+ profile_builder->OnProfileCompleted(base::TimeDelta(), base::TimeDelta());
+
+ const SampledProfile& proto = profile_builder->sampled_profile();
+
+ ASSERT_TRUE(proto.has_call_stack_profile());
+ const CallStackProfile& profile = proto.call_stack_profile();
+
+ ASSERT_EQ(2, profile.deprecated_sample_size());
+
+ uint32_t process_milestones = 0;
+ for (int i = 0; i < profile.deprecated_sample(0).process_phase().size(); ++i)
+ process_milestones |=
+ 1U << profile.deprecated_sample(0).process_phase().Get(i);
+ EXPECT_EQ(0U, process_milestones);
+
+ process_milestones = 0;
+ for (int i = 0; i < profile.deprecated_sample(1).process_phase().size(); ++i)
+ process_milestones |=
+ 1U << profile.deprecated_sample(1).process_phase().Get(i);
+ EXPECT_EQ(1U << 1, process_milestones);
+}
+
+TEST(CallStackProfileBuilderTest, ProfilingCompleted) {
+ // Set up a mock completed callback which will be run once.
+ base::MockCallback<base::OnceClosure> mock_closure;
+ EXPECT_CALL(mock_closure, Run()).Times(1);
+
+ auto profile_builder = std::make_unique<TestingCallStackProfileBuilder>(
+ kProfileParams, mock_closure.Get());
+
+#if defined(OS_WIN)
+ uint64_t module_md5 = 0x46C3E4166659AC02ULL;
+ base::FilePath module_path(L"c:\\some\\path\\to\\chrome.exe");
+#else
+ uint64_t module_md5 = 0x554838A8451AC36CULL;
+ base::FilePath module_path("/some/path/to/chrome");
+#endif
+
+ const uintptr_t module_base_address1 = 0x1000;
+ Module module1 = {module_base_address1, "1", module_path};
+ Frame frame1 = {module_base_address1 + 0x10, module1};
+
+ const uintptr_t module_base_address2 = 0x1100;
+ Module module2 = {module_base_address2, "2", module_path};
+ Frame frame2 = {module_base_address2 + 0x10, module2};
+
+ const uintptr_t module_base_address3 = 0x1010;
+ Module module3 = {module_base_address3, "3", module_path};
+ Frame frame3 = {module_base_address3 + 0x10, module3};
+
+ std::vector<Frame> frames1 = {frame1, frame2};
+ std::vector<Frame> frames2 = {frame3};
+
+ profile_builder->OnSampleCompleted(frames1);
+ profile_builder->OnSampleCompleted(frames2);
+ profile_builder->OnProfileCompleted(base::TimeDelta::FromMilliseconds(500),
+ base::TimeDelta::FromMilliseconds(100));
+
+ const SampledProfile& proto = profile_builder->sampled_profile();
+
+ ASSERT_TRUE(proto.has_process());
+ ASSERT_EQ(BROWSER_PROCESS, proto.process());
+ ASSERT_TRUE(proto.has_thread());
+ ASSERT_EQ(MAIN_THREAD, proto.thread());
+ ASSERT_TRUE(proto.has_trigger_event());
+ ASSERT_EQ(SampledProfile::PROCESS_STARTUP, proto.trigger_event());
+
+ ASSERT_TRUE(proto.has_call_stack_profile());
+ const CallStackProfile& profile = proto.call_stack_profile();
+
+ ASSERT_EQ(2, profile.deprecated_sample_size());
+ ASSERT_EQ(2, profile.deprecated_sample(0).frame_size());
+ ASSERT_TRUE(profile.deprecated_sample(0).frame(0).has_module_id_index());
+ EXPECT_EQ(0, profile.deprecated_sample(0).frame(0).module_id_index());
+ ASSERT_TRUE(profile.deprecated_sample(0).frame(1).has_module_id_index());
+ EXPECT_EQ(1, profile.deprecated_sample(0).frame(1).module_id_index());
+ ASSERT_EQ(1, profile.deprecated_sample(1).frame_size());
+ ASSERT_TRUE(profile.deprecated_sample(1).frame(0).has_module_id_index());
+ EXPECT_EQ(2, profile.deprecated_sample(1).frame(0).module_id_index());
+
+ ASSERT_EQ(3, profile.module_id().size());
+ ASSERT_TRUE(profile.module_id(0).has_build_id());
+ ASSERT_EQ("1", profile.module_id(0).build_id());
+ ASSERT_TRUE(profile.module_id(0).has_name_md5_prefix());
+ ASSERT_EQ(module_md5, profile.module_id(0).name_md5_prefix());
+ ASSERT_TRUE(profile.module_id(1).has_build_id());
+ ASSERT_EQ("2", profile.module_id(1).build_id());
+ ASSERT_TRUE(profile.module_id(1).has_name_md5_prefix());
+ ASSERT_EQ(module_md5, profile.module_id(1).name_md5_prefix());
+ ASSERT_TRUE(profile.module_id(2).has_build_id());
+ ASSERT_EQ("3", profile.module_id(2).build_id());
+ ASSERT_TRUE(profile.module_id(2).has_name_md5_prefix());
+ ASSERT_EQ(module_md5, profile.module_id(2).name_md5_prefix());
+
+ ASSERT_TRUE(profile.has_profile_duration_ms());
+ EXPECT_EQ(500, profile.profile_duration_ms());
+ ASSERT_TRUE(profile.has_sampling_period_ms());
+ EXPECT_EQ(100, profile.sampling_period_ms());
+}
+
+TEST(CallStackProfileBuilderTest, SamplesDeduped) {
+ auto profile_builder =
+ std::make_unique<TestingCallStackProfileBuilder>(kProfileParams);
+
+#if defined(OS_WIN)
+ base::FilePath module_path(L"c:\\some\\path\\to\\chrome.exe");
+#else
+ base::FilePath module_path("/some/path/to/chrome");
+#endif
+
+ const uintptr_t module_base_address1 = 0x1000;
+ Module module1 = {module_base_address1, "1", module_path};
+ Frame frame1 = {module_base_address1 + 0x10, module1};
+
+ const uintptr_t module_base_address2 = 0x1100;
+ Module module2 = {module_base_address2, "2", module_path};
+ Frame frame2 = {module_base_address2 + 0x10, module2};
+
+ std::vector<Frame> frames = {frame1, frame2};
+
+ // Two samples are completed with the same frames. They also have the same
+ // process milestone therefore they are deduped to one.
+ CallStackProfileBuilder::SetProcessMilestone(0);
+
+ profile_builder->RecordAnnotations();
+ profile_builder->OnSampleCompleted(frames);
+
+ profile_builder->RecordAnnotations();
+ profile_builder->OnSampleCompleted(frames);
+
+ profile_builder->OnProfileCompleted(base::TimeDelta(), base::TimeDelta());
+
+ const SampledProfile& proto = profile_builder->sampled_profile();
+
+ ASSERT_TRUE(proto.has_process());
+ ASSERT_EQ(BROWSER_PROCESS, proto.process());
+ ASSERT_TRUE(proto.has_thread());
+ ASSERT_EQ(MAIN_THREAD, proto.thread());
+ ASSERT_TRUE(proto.has_trigger_event());
+ ASSERT_EQ(SampledProfile::PROCESS_STARTUP, proto.trigger_event());
+
+ ASSERT_TRUE(proto.has_call_stack_profile());
+ ASSERT_EQ(1, proto.call_stack_profile().deprecated_sample_size());
+}
+
+TEST(CallStackProfileBuilderTest, SamplesNotDeduped) {
+ auto profile_builder =
+ std::make_unique<TestingCallStackProfileBuilder>(kProfileParams);
+
+#if defined(OS_WIN)
+ base::FilePath module_path(L"c:\\some\\path\\to\\chrome.exe");
+#else
+ base::FilePath module_path("/some/path/to/chrome");
+#endif
+
+ const uintptr_t module_base_address1 = 0x1000;
+ Module module1 = {module_base_address1, "1", module_path};
+ Frame frame1 = {module_base_address1 + 0x10, module1};
+
+ const uintptr_t module_base_address2 = 0x1100;
+ Module module2 = {module_base_address2, "2", module_path};
+ Frame frame2 = {module_base_address2 + 0x10, module2};
+
+ std::vector<Frame> frames = {frame1, frame2};
+
+ // Two samples are completed with the same frames but different process
+ // milestones. They are considered as different samples threfore not deduped.
+ CallStackProfileBuilder::SetProcessMilestone(2);
+ profile_builder->RecordAnnotations();
+ profile_builder->OnSampleCompleted(frames);
+
+ CallStackProfileBuilder::SetProcessMilestone(4);
+ profile_builder->RecordAnnotations();
+ profile_builder->OnSampleCompleted(frames);
+
+ profile_builder->OnProfileCompleted(base::TimeDelta(), base::TimeDelta());
+
+ const SampledProfile& proto = profile_builder->sampled_profile();
+
+ ASSERT_TRUE(proto.has_process());
+ ASSERT_EQ(BROWSER_PROCESS, proto.process());
+ ASSERT_TRUE(proto.has_thread());
+ ASSERT_EQ(MAIN_THREAD, proto.thread());
+ ASSERT_TRUE(proto.has_trigger_event());
+ ASSERT_EQ(SampledProfile::PROCESS_STARTUP, proto.trigger_event());
+
+ ASSERT_TRUE(proto.has_call_stack_profile());
+ ASSERT_EQ(2, proto.call_stack_profile().deprecated_sample_size());
+}
+
+TEST(CallStackProfileBuilderTest, Modules) {
+ auto profile_builder =
+ std::make_unique<TestingCallStackProfileBuilder>(kProfileParams);
+
+ const uintptr_t module_base_address1 = 0x1000;
+ Module module1; // module1 has no information hence invalid.
+ Frame frame1 = {module_base_address1 + 0x10, module1};
+
+ const uintptr_t module_base_address2 = 0x1100;
+#if defined(OS_WIN)
+ uint64_t module_md5 = 0x46C3E4166659AC02ULL;
+ base::FilePath module_path(L"c:\\some\\path\\to\\chrome.exe");
+#else
+ uint64_t module_md5 = 0x554838A8451AC36CULL;
+ base::FilePath module_path("/some/path/to/chrome");
+#endif
+ Module module2 = {module_base_address2, "2", module_path};
+ Frame frame2 = {module_base_address2 + 0x10, module2};
+
+ std::vector<Frame> frames = {frame1, frame2};
+
+ profile_builder->OnSampleCompleted(frames);
+ profile_builder->OnProfileCompleted(base::TimeDelta(), base::TimeDelta());
+
+ const SampledProfile& proto = profile_builder->sampled_profile();
+
+ ASSERT_TRUE(proto.has_call_stack_profile());
+ const CallStackProfile& profile = proto.call_stack_profile();
+
+ ASSERT_EQ(1, profile.deprecated_sample_size());
+ ASSERT_EQ(2, profile.deprecated_sample(0).frame_size());
+
+ ASSERT_FALSE(profile.deprecated_sample(0).frame(0).has_module_id_index());
+ ASSERT_FALSE(profile.deprecated_sample(0).frame(0).has_address());
+
+ ASSERT_TRUE(profile.deprecated_sample(0).frame(1).has_module_id_index());
+ EXPECT_EQ(0, profile.deprecated_sample(0).frame(1).module_id_index());
+ ASSERT_TRUE(profile.deprecated_sample(0).frame(1).has_address());
+ EXPECT_EQ(0x10ULL, profile.deprecated_sample(0).frame(1).address());
+
+ ASSERT_EQ(1, profile.module_id().size());
+ ASSERT_TRUE(profile.module_id(0).has_build_id());
+ ASSERT_EQ("2", profile.module_id(0).build_id());
+ ASSERT_TRUE(profile.module_id(0).has_name_md5_prefix());
+ ASSERT_EQ(module_md5, profile.module_id(0).name_md5_prefix());
+}
+
+TEST(CallStackProfileBuilderTest, DedupModules) {
+ auto profile_builder =
+ std::make_unique<TestingCallStackProfileBuilder>(kProfileParams);
+
+ const uintptr_t module_base_address = 0x1000;
+
+#if defined(OS_WIN)
+ uint64_t module_md5 = 0x46C3E4166659AC02ULL;
+ base::FilePath module_path(L"c:\\some\\path\\to\\chrome.exe");
+#else
+ uint64_t module_md5 = 0x554838A8451AC36CULL;
+ base::FilePath module_path("/some/path/to/chrome");
+#endif
+
+ Module module1 = {module_base_address, "1", module_path};
+ Frame frame1 = {module_base_address + 0x10, module1};
+
+ Module module2 = {module_base_address, "1", module_path};
+ Frame frame2 = {module_base_address + 0x20, module2};
+
+ std::vector<Frame> frames = {frame1, frame2};
+
+ profile_builder->OnSampleCompleted(frames);
+ profile_builder->OnProfileCompleted(base::TimeDelta(), base::TimeDelta());
+
+ const SampledProfile& proto = profile_builder->sampled_profile();
+
+ ASSERT_TRUE(proto.has_call_stack_profile());
+ const CallStackProfile& profile = proto.call_stack_profile();
+
+ ASSERT_EQ(1, profile.deprecated_sample_size());
+ ASSERT_EQ(2, profile.deprecated_sample(0).frame_size());
+
+ // Since module1 and module2 have the same base address, they are considered
+ // the same module and therefore deduped.
+ ASSERT_TRUE(profile.deprecated_sample(0).frame(0).has_module_id_index());
+ EXPECT_EQ(0, profile.deprecated_sample(0).frame(0).module_id_index());
+ ASSERT_TRUE(profile.deprecated_sample(0).frame(0).has_address());
+ EXPECT_EQ(0x10ULL, profile.deprecated_sample(0).frame(0).address());
+
+ ASSERT_TRUE(profile.deprecated_sample(0).frame(1).has_module_id_index());
+ EXPECT_EQ(0, profile.deprecated_sample(0).frame(1).module_id_index());
+ ASSERT_TRUE(profile.deprecated_sample(0).frame(1).has_address());
+ EXPECT_EQ(0x20ULL, profile.deprecated_sample(0).frame(1).address());
+
+ ASSERT_EQ(1, profile.module_id().size());
+ ASSERT_TRUE(profile.module_id(0).has_build_id());
+ ASSERT_EQ("1", profile.module_id(0).build_id());
+ ASSERT_TRUE(profile.module_id(0).has_name_md5_prefix());
+ ASSERT_EQ(module_md5, profile.module_id(0).name_md5_prefix());
+}
+
+} // namespace metrics
diff --git a/components/metrics/call_stack_profile_collector.cc b/components/metrics/call_stack_profile_collector.cc
new file mode 100644
index 0000000..3863331
--- /dev/null
+++ b/components/metrics/call_stack_profile_collector.cc
@@ -0,0 +1,40 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/call_stack_profile_collector.h"
+
+#include <memory>
+#include <utility>
+
+#include "components/metrics/call_stack_profile_encoding.h"
+#include "components/metrics/call_stack_profile_metrics_provider.h"
+#include "mojo/public/cpp/bindings/strong_binding.h"
+
+namespace metrics {
+
+CallStackProfileCollector::CallStackProfileCollector(
+ CallStackProfileParams::Process expected_process)
+ : expected_process_(expected_process) {}
+
+CallStackProfileCollector::~CallStackProfileCollector() {}
+
+// static
+void CallStackProfileCollector::Create(
+ CallStackProfileParams::Process expected_process,
+ mojom::CallStackProfileCollectorRequest request) {
+ mojo::MakeStrongBinding(
+ std::make_unique<CallStackProfileCollector>(expected_process),
+ std::move(request));
+}
+
+void CallStackProfileCollector::Collect(base::TimeTicks start_timestamp,
+ SampledProfile profile) {
+ if (profile.process() != ToExecutionContextProcess(expected_process_))
+ return;
+
+ CallStackProfileMetricsProvider::ReceiveCompletedProfile(start_timestamp,
+ std::move(profile));
+}
+
+} // namespace metrics
diff --git a/components/metrics/call_stack_profile_collector.h b/components/metrics/call_stack_profile_collector.h
new file mode 100644
index 0000000..0ffb94d
--- /dev/null
+++ b/components/metrics/call_stack_profile_collector.h
@@ -0,0 +1,40 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_CALL_STACK_PROFILE_COLLECTOR_H_
+#define COMPONENTS_METRICS_CALL_STACK_PROFILE_COLLECTOR_H_
+
+#include "base/macros.h"
+#include "components/metrics/call_stack_profile_params.h"
+#include "components/metrics/public/interfaces/call_stack_profile_collector.mojom.h"
+#include "third_party/metrics_proto/sampled_profile.pb.h"
+
+namespace metrics {
+
+class CallStackProfileCollector : public mojom::CallStackProfileCollector {
+ public:
+ explicit CallStackProfileCollector(
+ CallStackProfileParams::Process expected_process);
+ ~CallStackProfileCollector() override;
+
+ // Create a collector to receive profiles from |expected_process|.
+ static void Create(CallStackProfileParams::Process expected_process,
+ mojom::CallStackProfileCollectorRequest request);
+
+ // mojom::CallStackProfileCollector:
+ void Collect(base::TimeTicks start_timestamp,
+ SampledProfile profile) override;
+
+ private:
+ // Profile params are validated to come from this process. Profiles with a
+ // different process declared in the params are considered untrustworthy and
+ // ignored.
+ const CallStackProfileParams::Process expected_process_;
+
+ DISALLOW_COPY_AND_ASSIGN(CallStackProfileCollector);
+};
+
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_CALL_STACK_PROFILE_COLLECTOR_H_
diff --git a/components/metrics/call_stack_profile_encoding.cc b/components/metrics/call_stack_profile_encoding.cc
new file mode 100644
index 0000000..5d6a111
--- /dev/null
+++ b/components/metrics/call_stack_profile_encoding.cc
@@ -0,0 +1,67 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/call_stack_profile_encoding.h"
+
+namespace metrics {
+
+Process ToExecutionContextProcess(CallStackProfileParams::Process process) {
+ switch (process) {
+ case CallStackProfileParams::UNKNOWN_PROCESS:
+ return UNKNOWN_PROCESS;
+ case CallStackProfileParams::BROWSER_PROCESS:
+ return BROWSER_PROCESS;
+ case CallStackProfileParams::RENDERER_PROCESS:
+ return RENDERER_PROCESS;
+ case CallStackProfileParams::GPU_PROCESS:
+ return GPU_PROCESS;
+ case CallStackProfileParams::UTILITY_PROCESS:
+ return UTILITY_PROCESS;
+ case CallStackProfileParams::ZYGOTE_PROCESS:
+ return ZYGOTE_PROCESS;
+ case CallStackProfileParams::SANDBOX_HELPER_PROCESS:
+ return SANDBOX_HELPER_PROCESS;
+ case CallStackProfileParams::PPAPI_PLUGIN_PROCESS:
+ return PPAPI_PLUGIN_PROCESS;
+ case CallStackProfileParams::PPAPI_BROKER_PROCESS:
+ return PPAPI_BROKER_PROCESS;
+ }
+ NOTREACHED();
+ return UNKNOWN_PROCESS;
+}
+
+Thread ToExecutionContextThread(CallStackProfileParams::Thread thread) {
+ switch (thread) {
+ case CallStackProfileParams::UNKNOWN_THREAD:
+ return UNKNOWN_THREAD;
+ case CallStackProfileParams::MAIN_THREAD:
+ return MAIN_THREAD;
+ case CallStackProfileParams::IO_THREAD:
+ return IO_THREAD;
+ case CallStackProfileParams::COMPOSITOR_THREAD:
+ return COMPOSITOR_THREAD;
+ }
+ NOTREACHED();
+ return UNKNOWN_THREAD;
+}
+
+SampledProfile::TriggerEvent ToSampledProfileTriggerEvent(
+ CallStackProfileParams::Trigger trigger) {
+ switch (trigger) {
+ case CallStackProfileParams::UNKNOWN:
+ return SampledProfile::UNKNOWN_TRIGGER_EVENT;
+ case CallStackProfileParams::PROCESS_STARTUP:
+ return SampledProfile::PROCESS_STARTUP;
+ case CallStackProfileParams::JANKY_TASK:
+ return SampledProfile::JANKY_TASK;
+ case CallStackProfileParams::THREAD_HUNG:
+ return SampledProfile::THREAD_HUNG;
+ case CallStackProfileParams::PERIODIC_COLLECTION:
+ return SampledProfile::PERIODIC_COLLECTION;
+ }
+ NOTREACHED();
+ return SampledProfile::UNKNOWN_TRIGGER_EVENT;
+}
+
+} // namespace metrics
diff --git a/components/metrics/call_stack_profile_encoding.h b/components/metrics/call_stack_profile_encoding.h
new file mode 100644
index 0000000..c71ace7
--- /dev/null
+++ b/components/metrics/call_stack_profile_encoding.h
@@ -0,0 +1,28 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_CALL_STACK_PROFILE_ENCODING_H_
+#define COMPONENTS_METRICS_CALL_STACK_PROFILE_ENCODING_H_
+
+#include "components/metrics/call_stack_profile_params.h"
+#include "third_party/metrics_proto/sampled_profile.pb.h"
+
+namespace metrics {
+
+// Translates CallStackProfileParams's process to the corresponding execution
+// context Process.
+Process ToExecutionContextProcess(CallStackProfileParams::Process process);
+
+// Translates CallStackProfileParams's thread to the corresponding
+// SampledProfile Thread.
+Thread ToExecutionContextThread(CallStackProfileParams::Thread thread);
+
+// Translates CallStackProfileParams's trigger to the corresponding
+// SampledProfile TriggerEvent.
+SampledProfile::TriggerEvent ToSampledProfileTriggerEvent(
+ CallStackProfileParams::Trigger trigger);
+
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_CALL_STACK_PROFILE_ENCODING_H_
diff --git a/components/metrics/call_stack_profile_metrics_provider.cc b/components/metrics/call_stack_profile_metrics_provider.cc
new file mode 100644
index 0000000..63160d1
--- /dev/null
+++ b/components/metrics/call_stack_profile_metrics_provider.cc
@@ -0,0 +1,233 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/call_stack_profile_metrics_provider.h"
+
+#include <utility>
+#include <vector>
+
+#include "base/bind.h"
+#include "base/macros.h"
+#include "base/memory/singleton.h"
+#include "base/synchronization/lock.h"
+#include "base/time/time.h"
+#include "third_party/metrics_proto/chrome_user_metrics_extension.pb.h"
+
+namespace metrics {
+
+namespace {
+
+// Cap the number of pending profiles to avoid excessive memory usage when
+// profile uploads are delayed (e.g. due to being offline). 1250 profiles
+// corresponds to 80MB of storage. Capping at this threshold loses approximately
+// 0.5% of profiles on canary and dev.
+// TODO(chengx): Remove this threshold after moving to a more memory-efficient
+// profile representation.
+const size_t kMaxPendingProfiles = 1250;
+
+// ProfileState --------------------------------------------------------------
+
+// A set of profiles and the start time of the collection associated with them.
+struct ProfileState {
+ ProfileState(base::TimeTicks start_timestamp, SampledProfile profile);
+ ProfileState(ProfileState&&);
+ ProfileState& operator=(ProfileState&&);
+
+ // The time at which the profile collection was started.
+ base::TimeTicks start_timestamp;
+
+ // The call stack profile message collected by the profiler.
+ SampledProfile profile;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ProfileState);
+};
+
+ProfileState::ProfileState(base::TimeTicks start_timestamp,
+ SampledProfile profile)
+ : start_timestamp(start_timestamp), profile(std::move(profile)) {}
+
+ProfileState::ProfileState(ProfileState&&) = default;
+
+// Some versions of GCC need this for push_back to work with std::move.
+ProfileState& ProfileState::operator=(ProfileState&&) = default;
+
+// PendingProfiles ------------------------------------------------------------
+
+// Singleton class responsible for retaining profiles received from
+// CallStackProfileBuilder. These are then sent to UMA on the invocation of
+// CallStackProfileMetricsProvider::ProvideCurrentSessionData(). We need to
+// store the profiles outside of a CallStackProfileMetricsProvider instance
+// since callers may start profiling before the CallStackProfileMetricsProvider
+// is created.
+//
+// Member functions on this class may be called on any thread.
+class PendingProfiles {
+ public:
+ static PendingProfiles* GetInstance();
+
+ void Swap(std::vector<ProfileState>* profiles);
+
+ // Enables the collection of profiles by CollectProfilesIfCollectionEnabled if
+ // |enabled| is true. Otherwise, clears current profiles and ignores profiles
+ // provided to future invocations of CollectProfilesIfCollectionEnabled.
+ void SetCollectionEnabled(bool enabled);
+
+ // True if profiles are being collected.
+ bool IsCollectionEnabled() const;
+
+ // Adds |profile| to the list of profiles if collection is enabled; it is
+ // not const& because it must be passed with std::move.
+ void CollectProfilesIfCollectionEnabled(ProfileState profile);
+
+ // Allows testing against the initial state multiple times.
+ void ResetToDefaultStateForTesting();
+
+ private:
+ friend struct base::DefaultSingletonTraits<PendingProfiles>;
+
+ PendingProfiles();
+ ~PendingProfiles() = default;
+
+ mutable base::Lock lock_;
+
+ // If true, profiles provided to CollectProfilesIfCollectionEnabled should be
+ // collected. Otherwise they will be ignored.
+ bool collection_enabled_;
+
+ // The last time collection was disabled. Used to determine if collection was
+ // disabled at any point since a profile was started.
+ base::TimeTicks last_collection_disable_time_;
+
+ // The last time collection was enabled. Used to determine if collection was
+ // enabled at any point since a profile was started.
+ base::TimeTicks last_collection_enable_time_;
+
+ // The set of completed profiles that should be reported.
+ std::vector<ProfileState> profiles_;
+
+ DISALLOW_COPY_AND_ASSIGN(PendingProfiles);
+};
+
+// static
+PendingProfiles* PendingProfiles::GetInstance() {
+ // Leaky for performance rather than correctness reasons.
+ return base::Singleton<PendingProfiles,
+ base::LeakySingletonTraits<PendingProfiles>>::get();
+}
+
+void PendingProfiles::Swap(std::vector<ProfileState>* profiles) {
+ base::AutoLock scoped_lock(lock_);
+ profiles_.swap(*profiles);
+}
+
+void PendingProfiles::SetCollectionEnabled(bool enabled) {
+ base::AutoLock scoped_lock(lock_);
+
+ collection_enabled_ = enabled;
+
+ if (!collection_enabled_) {
+ profiles_.clear();
+ last_collection_disable_time_ = base::TimeTicks::Now();
+ } else {
+ last_collection_enable_time_ = base::TimeTicks::Now();
+ }
+}
+
+bool PendingProfiles::IsCollectionEnabled() const {
+ base::AutoLock scoped_lock(lock_);
+ return collection_enabled_;
+}
+
+void PendingProfiles::CollectProfilesIfCollectionEnabled(ProfileState profile) {
+ base::AutoLock scoped_lock(lock_);
+
+ // Scenario 1: stop collection if it is disabled.
+ if (!collection_enabled_)
+ return;
+
+ // Scenario 2: stop collection if it is disabled after the start of collection
+ // for this profile.
+ if (!last_collection_disable_time_.is_null() &&
+ last_collection_disable_time_ >= profile.start_timestamp) {
+ return;
+ }
+
+ // Scenario 3: stop collection if it is disabled before the start of
+ // collection and re-enabled after the start. Note that this is different from
+ // scenario 1 where re-enabling never happens.
+ if (!last_collection_disable_time_.is_null() &&
+ !last_collection_enable_time_.is_null() &&
+ last_collection_enable_time_ >= profile.start_timestamp) {
+ return;
+ }
+
+ if (profiles_.size() < kMaxPendingProfiles)
+ profiles_.push_back(std::move(profile));
+}
+
+void PendingProfiles::ResetToDefaultStateForTesting() {
+ base::AutoLock scoped_lock(lock_);
+
+ collection_enabled_ = true;
+ last_collection_disable_time_ = base::TimeTicks();
+ last_collection_enable_time_ = base::TimeTicks();
+ profiles_.clear();
+}
+
+// |collection_enabled_| is initialized to true to collect any profiles that are
+// generated prior to creation of the CallStackProfileMetricsProvider. The
+// ultimate disposition of these pre-creation collected profiles will be
+// determined by the initial recording state provided to
+// CallStackProfileMetricsProvider.
+PendingProfiles::PendingProfiles() : collection_enabled_(true) {}
+
+} // namespace
+
+// CallStackProfileMetricsProvider --------------------------------------------
+
+const base::Feature CallStackProfileMetricsProvider::kEnableReporting = {
+ "SamplingProfilerReporting", base::FEATURE_DISABLED_BY_DEFAULT};
+
+CallStackProfileMetricsProvider::CallStackProfileMetricsProvider() {}
+
+CallStackProfileMetricsProvider::~CallStackProfileMetricsProvider() {}
+
+// static
+void CallStackProfileMetricsProvider::ReceiveCompletedProfile(
+ base::TimeTicks profile_start_time,
+ SampledProfile profile) {
+ PendingProfiles::GetInstance()->CollectProfilesIfCollectionEnabled(
+ ProfileState(profile_start_time, std::move(profile)));
+}
+
+void CallStackProfileMetricsProvider::OnRecordingEnabled() {
+ PendingProfiles::GetInstance()->SetCollectionEnabled(
+ base::FeatureList::IsEnabled(kEnableReporting));
+}
+
+void CallStackProfileMetricsProvider::OnRecordingDisabled() {
+ PendingProfiles::GetInstance()->SetCollectionEnabled(false);
+}
+
+void CallStackProfileMetricsProvider::ProvideCurrentSessionData(
+ ChromeUserMetricsExtension* uma_proto) {
+ std::vector<ProfileState> pending_profiles;
+ PendingProfiles::GetInstance()->Swap(&pending_profiles);
+
+ DCHECK(base::FeatureList::IsEnabled(kEnableReporting) ||
+ pending_profiles.empty());
+
+ for (const auto& profile_state : pending_profiles) {
+ SampledProfile* sampled_profile = uma_proto->add_sampled_profile();
+ *sampled_profile = std::move(profile_state.profile);
+ }
+}
+
+// static
+void CallStackProfileMetricsProvider::ResetStaticStateForTesting() {
+ PendingProfiles::GetInstance()->ResetToDefaultStateForTesting();
+}
+
+} // namespace metrics
diff --git a/components/metrics/call_stack_profile_metrics_provider.h b/components/metrics/call_stack_profile_metrics_provider.h
new file mode 100644
index 0000000..e977040
--- /dev/null
+++ b/components/metrics/call_stack_profile_metrics_provider.h
@@ -0,0 +1,49 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_CALL_STACK_PROFILE_METRICS_PROVIDER_H_
+#define COMPONENTS_METRICS_CALL_STACK_PROFILE_METRICS_PROVIDER_H_
+
+#include "base/feature_list.h"
+#include "base/macros.h"
+#include "base/time/time.h"
+#include "components/metrics/metrics_provider.h"
+#include "third_party/metrics_proto/sampled_profile.pb.h"
+
+namespace metrics {
+
+class ChromeUserMetricsExtension;
+
+// Performs metrics logging for the stack sampling profiler.
+class CallStackProfileMetricsProvider : public MetricsProvider {
+ public:
+ CallStackProfileMetricsProvider();
+ ~CallStackProfileMetricsProvider() override;
+
+ // Will be invoked on either the main thread or the profiler's thread.
+ // Provides the profile to PendingProfiles to append, if the collecting state
+ // allows. |profile| is not const& because it must be passed with std::move.
+ static void ReceiveCompletedProfile(base::TimeTicks profile_start_time,
+ SampledProfile profile);
+
+ // MetricsProvider:
+ void OnRecordingEnabled() override;
+ void OnRecordingDisabled() override;
+ void ProvideCurrentSessionData(
+ ChromeUserMetricsExtension* uma_proto) override;
+
+ protected:
+ // base::Feature for reporting profiles. Provided here for test use.
+ static const base::Feature kEnableReporting;
+
+ // Reset the static state to the defaults after startup.
+ static void ResetStaticStateForTesting();
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(CallStackProfileMetricsProvider);
+};
+
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_CALL_STACK_PROFILE_METRICS_PROVIDER_H_
diff --git a/components/metrics/call_stack_profile_metrics_provider_unittest.cc b/components/metrics/call_stack_profile_metrics_provider_unittest.cc
new file mode 100644
index 0000000..b326252
--- /dev/null
+++ b/components/metrics/call_stack_profile_metrics_provider_unittest.cc
@@ -0,0 +1,122 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/call_stack_profile_metrics_provider.h"
+
+#include <utility>
+
+#include "base/macros.h"
+#include "base/test/scoped_feature_list.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/metrics_proto/chrome_user_metrics_extension.pb.h"
+
+namespace metrics {
+
+// This test fixture enables the feature that
+// CallStackProfileMetricsProvider depends on to report a profile.
+class CallStackProfileMetricsProviderTest : public testing::Test {
+ public:
+ CallStackProfileMetricsProviderTest() {
+ scoped_feature_list_.InitAndEnableFeature(TestState::kEnableReporting);
+ TestState::ResetStaticStateForTesting();
+ }
+
+ ~CallStackProfileMetricsProviderTest() override {}
+
+ private:
+ // Exposes the feature from the CallStackProfileMetricsProvider.
+ class TestState : public CallStackProfileMetricsProvider {
+ public:
+ using CallStackProfileMetricsProvider::kEnableReporting;
+ using CallStackProfileMetricsProvider::ResetStaticStateForTesting;
+ };
+
+ base::test::ScopedFeatureList scoped_feature_list_;
+
+ DISALLOW_COPY_AND_ASSIGN(CallStackProfileMetricsProviderTest);
+};
+
+// Checks that the pending profile is passed to ProvideCurrentSessionData.
+TEST_F(CallStackProfileMetricsProviderTest, ProvideCurrentSessionData) {
+ CallStackProfileMetricsProvider provider;
+ provider.OnRecordingEnabled();
+ CallStackProfileMetricsProvider::ReceiveCompletedProfile(
+ base::TimeTicks::Now(), SampledProfile());
+ ChromeUserMetricsExtension uma_proto;
+ provider.ProvideCurrentSessionData(&uma_proto);
+ ASSERT_EQ(1, uma_proto.sampled_profile().size());
+}
+
+// Checks that the pending profile is provided to ProvideCurrentSessionData
+// when collected before CallStackProfileMetricsProvider is instantiated.
+TEST_F(CallStackProfileMetricsProviderTest,
+ ProfileProvidedWhenCollectedBeforeInstantiation) {
+ CallStackProfileMetricsProvider::ReceiveCompletedProfile(
+ base::TimeTicks::Now(), SampledProfile());
+ CallStackProfileMetricsProvider provider;
+ provider.OnRecordingEnabled();
+ ChromeUserMetricsExtension uma_proto;
+ provider.ProvideCurrentSessionData(&uma_proto);
+ EXPECT_EQ(1, uma_proto.sampled_profile_size());
+}
+
+// Checks that the pending profile is not provided to ProvideCurrentSessionData
+// while recording is disabled.
+TEST_F(CallStackProfileMetricsProviderTest, ProfileNotProvidedWhileDisabled) {
+ CallStackProfileMetricsProvider provider;
+ provider.OnRecordingDisabled();
+ CallStackProfileMetricsProvider::ReceiveCompletedProfile(
+ base::TimeTicks::Now(), SampledProfile());
+ ChromeUserMetricsExtension uma_proto;
+ provider.ProvideCurrentSessionData(&uma_proto);
+ EXPECT_EQ(0, uma_proto.sampled_profile_size());
+}
+
+// Checks that the pending profile is not provided to ProvideCurrentSessionData
+// if recording is disabled while profiling.
+TEST_F(CallStackProfileMetricsProviderTest,
+ ProfileNotProvidedAfterChangeToDisabled) {
+ CallStackProfileMetricsProvider provider;
+ provider.OnRecordingEnabled();
+ base::TimeTicks profile_start_time = base::TimeTicks::Now();
+ provider.OnRecordingDisabled();
+ CallStackProfileMetricsProvider::ReceiveCompletedProfile(profile_start_time,
+ SampledProfile());
+ ChromeUserMetricsExtension uma_proto;
+ provider.ProvideCurrentSessionData(&uma_proto);
+ EXPECT_EQ(0, uma_proto.sampled_profile_size());
+}
+
+// Checks that the pending profile is not provided to ProvideCurrentSessionData
+// if recording is enabled, but then disabled and reenabled while profiling.
+TEST_F(CallStackProfileMetricsProviderTest,
+ ProfileNotProvidedAfterChangeToDisabledThenEnabled) {
+ CallStackProfileMetricsProvider provider;
+ provider.OnRecordingEnabled();
+ base::TimeTicks profile_start_time = base::TimeTicks::Now();
+ provider.OnRecordingDisabled();
+ provider.OnRecordingEnabled();
+ CallStackProfileMetricsProvider::ReceiveCompletedProfile(profile_start_time,
+ SampledProfile());
+ ChromeUserMetricsExtension uma_proto;
+ provider.ProvideCurrentSessionData(&uma_proto);
+ EXPECT_EQ(0, uma_proto.sampled_profile_size());
+}
+
+// Checks that the pending profile is provided to ProvideCurrentSessionData
+// if recording is disabled, but then enabled while profiling.
+TEST_F(CallStackProfileMetricsProviderTest,
+ ProfileNotProvidedAfterChangeFromDisabled) {
+ CallStackProfileMetricsProvider provider;
+ provider.OnRecordingDisabled();
+ base::TimeTicks profile_start_time = base::TimeTicks::Now();
+ provider.OnRecordingEnabled();
+ CallStackProfileMetricsProvider::ReceiveCompletedProfile(profile_start_time,
+ SampledProfile());
+ ChromeUserMetricsExtension uma_proto;
+ provider.ProvideCurrentSessionData(&uma_proto);
+ EXPECT_EQ(0, uma_proto.sampled_profile_size());
+}
+
+} // namespace metrics
diff --git a/components/metrics/call_stack_profile_params.h b/components/metrics/call_stack_profile_params.h
new file mode 100644
index 0000000..5e78ef1
--- /dev/null
+++ b/components/metrics/call_stack_profile_params.h
@@ -0,0 +1,71 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_CALL_STACK_PROFILE_PARAMS_H_
+#define COMPONENTS_METRICS_CALL_STACK_PROFILE_PARAMS_H_
+
+#include "base/time/time.h"
+
+namespace metrics {
+
+// Parameters to pass back to the metrics provider.
+struct CallStackProfileParams {
+ // The process in which the collection occurred.
+ enum Process {
+ UNKNOWN_PROCESS,
+ BROWSER_PROCESS,
+ RENDERER_PROCESS,
+ GPU_PROCESS,
+ UTILITY_PROCESS,
+ ZYGOTE_PROCESS,
+ SANDBOX_HELPER_PROCESS,
+ PPAPI_PLUGIN_PROCESS,
+ PPAPI_BROKER_PROCESS
+ };
+
+ // The thread from which the collection occurred.
+ enum Thread {
+ UNKNOWN_THREAD,
+
+ // Each process has a 'main thread'. In the Browser process, the 'main
+ // thread' is also often called the 'UI thread'.
+ MAIN_THREAD,
+ IO_THREAD,
+
+ // Compositor thread (can be in both renderer and gpu processes).
+ COMPOSITOR_THREAD,
+ };
+
+ // The event that triggered the profile collection.
+ enum Trigger {
+ UNKNOWN,
+ PROCESS_STARTUP,
+ JANKY_TASK,
+ THREAD_HUNG,
+ PERIODIC_COLLECTION,
+ TRIGGER_LAST = PERIODIC_COLLECTION
+ };
+
+ // The default constructor is required for mojo and should not be used
+ // otherwise. A valid trigger should always be specified.
+ constexpr CallStackProfileParams()
+ : CallStackProfileParams(UNKNOWN_PROCESS, UNKNOWN_THREAD, UNKNOWN) {}
+ constexpr CallStackProfileParams(Process process,
+ Thread thread,
+ Trigger trigger)
+ : process(process), thread(thread), trigger(trigger) {}
+
+ // The collection process.
+ Process process;
+
+ // The collection thread.
+ Thread thread;
+
+ // The triggering event.
+ Trigger trigger;
+};
+
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_CALL_STACK_PROFILE_PARAMS_H_
diff --git a/components/metrics/child_call_stack_profile_collector.cc b/components/metrics/child_call_stack_profile_collector.cc
new file mode 100644
index 0000000..0ae6d47
--- /dev/null
+++ b/components/metrics/child_call_stack_profile_collector.cc
@@ -0,0 +1,81 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/child_call_stack_profile_collector.h"
+
+#include <utility>
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "base/time/time.h"
+
+namespace metrics {
+
+ChildCallStackProfileCollector::ProfileState::ProfileState() = default;
+ChildCallStackProfileCollector::ProfileState::ProfileState(ProfileState&&) =
+ default;
+
+ChildCallStackProfileCollector::ProfileState::ProfileState(
+ base::TimeTicks start_timestamp,
+ SampledProfile profile)
+ : start_timestamp(start_timestamp), profile(std::move(profile)) {}
+
+ChildCallStackProfileCollector::ProfileState::~ProfileState() = default;
+
+// Some versions of GCC need this for push_back to work with std::move.
+ChildCallStackProfileCollector::ProfileState&
+ChildCallStackProfileCollector::ProfileState::operator=(ProfileState&&) =
+ default;
+
+ChildCallStackProfileCollector::ChildCallStackProfileCollector() {}
+
+ChildCallStackProfileCollector::~ChildCallStackProfileCollector() {}
+
+void ChildCallStackProfileCollector::SetParentProfileCollector(
+ metrics::mojom::CallStackProfileCollectorPtr parent_collector) {
+ base::AutoLock alock(lock_);
+ // This function should only invoked once, during the mode of operation when
+ // retaining profiles after construction.
+ DCHECK(retain_profiles_);
+ retain_profiles_ = false;
+ task_runner_ = base::ThreadTaskRunnerHandle::Get();
+ // This should only be set one time per child process.
+ DCHECK(!parent_collector_);
+ parent_collector_ = std::move(parent_collector);
+ if (parent_collector_) {
+ for (ProfileState& state : profiles_) {
+ parent_collector_->Collect(state.start_timestamp,
+ std::move(state.profile));
+ }
+ }
+ profiles_.clear();
+}
+
+void ChildCallStackProfileCollector::Collect(base::TimeTicks start_timestamp,
+ SampledProfile profile) {
+ base::AutoLock alock(lock_);
+ if (task_runner_ &&
+ // The profiler thread does not have a task runner. Attempting to
+ // invoke Get() on it results in a DCHECK.
+ (!base::ThreadTaskRunnerHandle::IsSet() ||
+ base::ThreadTaskRunnerHandle::Get() != task_runner_)) {
+ // Post back to the thread that owns the the parent interface.
+ task_runner_->PostTask(
+ FROM_HERE, base::BindOnce(&ChildCallStackProfileCollector::Collect,
+ // This class has lazy instance lifetime.
+ base::Unretained(this), start_timestamp,
+ std::move(profile)));
+ return;
+ }
+
+ if (parent_collector_) {
+ parent_collector_->Collect(start_timestamp, std::move(profile));
+ } else if (retain_profiles_) {
+ profiles_.push_back(ProfileState(start_timestamp, std::move(profile)));
+ }
+}
+
+} // namespace metrics
diff --git a/components/metrics/child_call_stack_profile_collector.h b/components/metrics/child_call_stack_profile_collector.h
new file mode 100644
index 0000000..27815d8
--- /dev/null
+++ b/components/metrics/child_call_stack_profile_collector.h
@@ -0,0 +1,115 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_CHILD_CALL_STACK_PROFILE_COLLECTOR_H_
+#define COMPONENTS_METRICS_CHILD_CALL_STACK_PROFILE_COLLECTOR_H_
+
+#include <vector>
+
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/single_thread_task_runner.h"
+#include "base/synchronization/lock.h"
+#include "components/metrics/public/interfaces/call_stack_profile_collector.mojom.h"
+#include "third_party/metrics_proto/sampled_profile.pb.h"
+
+namespace service_manager {
+class InterfaceProvider;
+}
+
+namespace metrics {
+
+// ChildCallStackProfileCollector collects stacks at startup, caching them
+// internally until a CallStackProfileCollector interface is available. If a
+// CallStackProfileCollector is provided via the InterfaceProvider supplied to
+// SetParentProfileCollector, the cached stacks are sent via that interface. All
+// future stacks received via callbacks supplied by GetProfilerCallback are sent
+// via that interface as well.
+//
+// If no CallStackProfileCollector is provided via InterfaceProvider, any cached
+// stacks and all future stacks received via callbacks supplied by
+// GetProfilerCallback are flushed. In typical usage this should not happen
+// because the browser is expected to always supply a CallStackProfileCollector.
+//
+// This class is only necessary if a CallStackProfileCollector is not available
+// at the time the profiler is created. Otherwise the CallStackProfileCollector
+// can be used directly.
+//
+// To use, create as a leaky lazy instance:
+//
+// base::LazyInstance<metrics::ChildCallStackProfileCollector>::Leaky
+// g_call_stack_profile_collector = LAZY_INSTANCE_INITIALIZER;
+//
+// Then, invoke Collect() in CallStackProfileBuilder::OnProfileCompleted() to
+// collect a profile.
+//
+// When the mojo InterfaceProvider becomes available, provide it via
+// SetParentProfileCollector().
+class ChildCallStackProfileCollector {
+ public:
+ ChildCallStackProfileCollector();
+ ~ChildCallStackProfileCollector();
+
+ // Sets the CallStackProfileCollector interface from |parent_collector|. This
+ // function MUST be invoked exactly once, regardless of whether
+ // |parent_collector| is null, as it flushes pending data in either case.
+ void SetParentProfileCollector(
+ metrics::mojom::CallStackProfileCollectorPtr parent_collector);
+
+ // Collects |profile| whose collection start time is |start_timestamp|.
+ void Collect(base::TimeTicks start_timestamp, SampledProfile profile);
+
+ private:
+ friend class ChildCallStackProfileCollectorTest;
+
+ // Bundles together a collected profile and the collection state for
+ // storage, pending availability of the parent mojo interface. |profile|
+ // is not const& because it must be passed with std::move.
+ struct ProfileState {
+ ProfileState();
+ ProfileState(ProfileState&&);
+ ProfileState(base::TimeTicks start_timestamp, SampledProfile profile);
+ ~ProfileState();
+
+ ProfileState& operator=(ProfileState&&);
+
+ base::TimeTicks start_timestamp;
+
+ // The sampled profile.
+ SampledProfile profile;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ProfileState);
+ };
+
+ // This object may be accessed on any thread, including the profiler
+ // thread. The expected use case for the object is to be created and have
+ // GetProfilerCallback before the message loop starts, which prevents the use
+ // of PostTask and the like for inter-thread communication.
+ base::Lock lock_;
+
+ // Whether to retain the profile when the interface is not set. Remains true
+ // until the invocation of SetParentProfileCollector(), at which point it is
+ // false for the rest of the object lifetime.
+ bool retain_profiles_ = true;
+
+ // The task runner associated with the parent interface.
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
+
+ // The interface to use to collect the stack profiles provided to this
+ // object. Initially null until SetParentProfileCollector() is invoked, at
+ // which point it may either become set or remain null. If set, stacks are
+ // collected via the interface, otherwise they are ignored.
+ mojom::CallStackProfileCollectorPtr parent_collector_;
+
+ // Profiles being cached by this object, pending a parent interface to be
+ // supplied.
+ std::vector<ProfileState> profiles_;
+
+ DISALLOW_COPY_AND_ASSIGN(ChildCallStackProfileCollector);
+};
+
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_CHILD_CALL_STACK_PROFILE_COLLECTOR_H_
diff --git a/components/metrics/child_call_stack_profile_collector_unittest.cc b/components/metrics/child_call_stack_profile_collector_unittest.cc
new file mode 100644
index 0000000..24cc30e
--- /dev/null
+++ b/components/metrics/child_call_stack_profile_collector_unittest.cc
@@ -0,0 +1,112 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/child_call_stack_profile_collector.h"
+
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "base/bind.h"
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "mojo/public/cpp/bindings/binding.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace metrics {
+
+class ChildCallStackProfileCollectorTest : public testing::Test {
+ protected:
+ class Receiver : public mojom::CallStackProfileCollector {
+ public:
+ explicit Receiver(mojom::CallStackProfileCollectorRequest request)
+ : binding_(this, std::move(request)) {}
+ ~Receiver() override {}
+
+ void Collect(base::TimeTicks start_timestamp,
+ SampledProfile profile) override {
+ this->profiles.push_back(ChildCallStackProfileCollector::ProfileState(
+ start_timestamp, std::move(profile)));
+ }
+
+ std::vector<ChildCallStackProfileCollector::ProfileState> profiles;
+
+ private:
+ mojo::Binding<mojom::CallStackProfileCollector> binding_;
+
+ DISALLOW_COPY_AND_ASSIGN(Receiver);
+ };
+
+ ChildCallStackProfileCollectorTest()
+ : receiver_impl_(new Receiver(MakeRequest(&receiver_))) {}
+
+ void CollectEmptyProfile() {
+ child_collector_.Collect(base::TimeTicks::Now(), SampledProfile());
+ }
+
+ const std::vector<ChildCallStackProfileCollector::ProfileState>& profiles()
+ const {
+ return child_collector_.profiles_;
+ }
+
+ base::MessageLoop loop_;
+ mojom::CallStackProfileCollectorPtr receiver_;
+ std::unique_ptr<Receiver> receiver_impl_;
+ ChildCallStackProfileCollector child_collector_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ChildCallStackProfileCollectorTest);
+};
+
+// Test the behavior when an interface is provided.
+TEST_F(ChildCallStackProfileCollectorTest, InterfaceProvided) {
+ EXPECT_EQ(0u, profiles().size());
+
+ // Add a profile before providing the interface.
+ CollectEmptyProfile();
+ ASSERT_EQ(1u, profiles().size());
+ base::TimeTicks start_timestamp = profiles()[0].start_timestamp;
+ EXPECT_GE(base::TimeDelta::FromMilliseconds(10),
+ base::TimeTicks::Now() - start_timestamp);
+
+ // Set the interface. The profiles should be passed to it.
+ child_collector_.SetParentProfileCollector(std::move(receiver_));
+ base::RunLoop().RunUntilIdle();
+ EXPECT_EQ(0u, profiles().size());
+ ASSERT_EQ(1u, receiver_impl_->profiles.size());
+ EXPECT_EQ(start_timestamp, receiver_impl_->profiles[0].start_timestamp);
+
+ // Add a profile after providing the interface. It should also be passed.
+ receiver_impl_->profiles.clear();
+ CollectEmptyProfile();
+ base::RunLoop().RunUntilIdle();
+ EXPECT_EQ(0u, profiles().size());
+ ASSERT_EQ(1u, receiver_impl_->profiles.size());
+ EXPECT_GE(base::TimeDelta::FromMilliseconds(10),
+ (base::TimeTicks::Now() -
+ receiver_impl_->profiles[0].start_timestamp));
+}
+
+TEST_F(ChildCallStackProfileCollectorTest, InterfaceNotProvided) {
+ EXPECT_EQ(0u, profiles().size());
+
+ // Add a profile before providing a null interface.
+ CollectEmptyProfile();
+ ASSERT_EQ(1u, profiles().size());
+ EXPECT_GE(base::TimeDelta::FromMilliseconds(10),
+ base::TimeTicks::Now() - profiles()[0].start_timestamp);
+
+ // Set the null interface. The profile should be flushed.
+ child_collector_.SetParentProfileCollector(
+ mojom::CallStackProfileCollectorPtr());
+ base::RunLoop().RunUntilIdle();
+ EXPECT_EQ(0u, profiles().size());
+
+ // Add a profile after providing a null interface. They should also be
+ // flushed.
+ CollectEmptyProfile();
+ EXPECT_EQ(0u, profiles().size());
+}
+
+} // namespace metrics
diff --git a/components/metrics/clean_exit_beacon.cc b/components/metrics/clean_exit_beacon.cc
new file mode 100644
index 0000000..74aefd8
--- /dev/null
+++ b/components/metrics/clean_exit_beacon.cc
@@ -0,0 +1,98 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/clean_exit_beacon.h"
+
+#include "base/logging.h"
+#include "build/build_config.h"
+#include "components/metrics/metrics_pref_names.h"
+#include "components/prefs/pref_registry_simple.h"
+#include "components/prefs/pref_service.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+#include "base/metrics/histogram_macros.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/win/registry.h"
+#endif
+
+namespace metrics {
+
+CleanExitBeacon::CleanExitBeacon(const base::string16& backup_registry_key,
+ PrefService* local_state)
+ : local_state_(local_state),
+ initial_value_(local_state->GetBoolean(prefs::kStabilityExitedCleanly)),
+ initial_browser_last_live_timestamp_(
+ local_state->GetTime(prefs::kStabilityBrowserLastLiveTimeStamp)),
+ backup_registry_key_(backup_registry_key) {
+ DCHECK_NE(PrefService::INITIALIZATION_STATUS_WAITING,
+ local_state_->GetInitializationStatus());
+
+#if defined(OS_WIN)
+ // An enumeration of all possible permutations of the the beacon state in the
+ // registry and in Local State.
+ enum {
+ DIRTY_DIRTY,
+ DIRTY_CLEAN,
+ CLEAN_DIRTY,
+ CLEAN_CLEAN,
+ MISSING_DIRTY,
+ MISSING_CLEAN,
+ NUM_CONSISTENCY_ENUMS
+ } consistency = DIRTY_DIRTY;
+
+ base::win::RegKey regkey;
+ DWORD value = 0u;
+ if (regkey.Open(HKEY_CURRENT_USER,
+ backup_registry_key_.c_str(),
+ KEY_ALL_ACCESS) == ERROR_SUCCESS &&
+ regkey.ReadValueDW(
+ base::ASCIIToUTF16(prefs::kStabilityExitedCleanly).c_str(), &value) ==
+ ERROR_SUCCESS) {
+ if (value)
+ consistency = initial_value_ ? CLEAN_CLEAN : CLEAN_DIRTY;
+ else
+ consistency = initial_value_ ? DIRTY_CLEAN : DIRTY_DIRTY;
+ } else {
+ consistency = initial_value_ ? MISSING_CLEAN : MISSING_DIRTY;
+ }
+
+ UMA_HISTOGRAM_ENUMERATION(
+ "UMA.CleanExitBeaconConsistency", consistency, NUM_CONSISTENCY_ENUMS);
+#endif
+}
+
+CleanExitBeacon::~CleanExitBeacon() {
+}
+
+// static
+void CleanExitBeacon::RegisterPrefs(PrefRegistrySimple* registry) {
+ registry->RegisterBooleanPref(prefs::kStabilityExitedCleanly, true);
+
+ registry->RegisterTimePref(prefs::kStabilityBrowserLastLiveTimeStamp,
+ base::Time(), PrefRegistry::LOSSY_PREF);
+}
+
+void CleanExitBeacon::WriteBeaconValue(bool value) {
+ UpdateLastLiveTimestamp();
+ local_state_->SetBoolean(prefs::kStabilityExitedCleanly, value);
+
+#if defined(OS_WIN)
+ base::win::RegKey regkey;
+ if (regkey.Create(HKEY_CURRENT_USER,
+ backup_registry_key_.c_str(),
+ KEY_ALL_ACCESS) == ERROR_SUCCESS) {
+ regkey.WriteValue(
+ base::ASCIIToUTF16(prefs::kStabilityExitedCleanly).c_str(),
+ value ? 1u : 0u);
+ }
+#endif
+}
+
+void CleanExitBeacon::UpdateLastLiveTimestamp() {
+ local_state_->SetTime(prefs::kStabilityBrowserLastLiveTimeStamp,
+ base::Time::Now());
+}
+
+} // namespace metrics
diff --git a/components/metrics/clean_exit_beacon.h b/components/metrics/clean_exit_beacon.h
new file mode 100644
index 0000000..0043596
--- /dev/null
+++ b/components/metrics/clean_exit_beacon.h
@@ -0,0 +1,63 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_CLEAN_EXIT_BEACON_H_
+#define COMPONENTS_METRICS_CLEAN_EXIT_BEACON_H_
+
+#include "base/macros.h"
+#include "base/strings/string16.h"
+#include "base/time/time.h"
+
+class PrefRegistrySimple;
+class PrefService;
+
+namespace metrics {
+
+// Reads and updates a beacon used to detect whether the previous browser
+// process exited cleanly.
+class CleanExitBeacon {
+ public:
+ // Instantiates a CleanExitBeacon whose value is stored in |local_state|.
+ // |local_state| must be fully initialized.
+ // On Windows, |backup_registry_key| is used to store a backup of the beacon.
+ // It is ignored on other platforms.
+ CleanExitBeacon(
+ const base::string16& backup_registry_key,
+ PrefService* local_state);
+
+ ~CleanExitBeacon();
+
+ // Returns the original value of the beacon.
+ bool exited_cleanly() const { return initial_value_; }
+
+ // Returns the original value of the last live timestamp.
+ base::Time browser_last_live_timestamp() const {
+ return initial_browser_last_live_timestamp_;
+ }
+
+ // Writes the provided beacon value and updates the last live timestamp.
+ void WriteBeaconValue(bool exited_cleanly);
+
+ // Updates the last live timestamp.
+ void UpdateLastLiveTimestamp();
+
+ // Registers local state prefs used by this class.
+ static void RegisterPrefs(PrefRegistrySimple* registry);
+
+ private:
+ PrefService* const local_state_;
+ const bool initial_value_;
+
+ // This is the value of the last live timestamp from local state at the
+ // time of construction. It notes a timestamp from the previous browser
+ // session when the browser was known to be alive.
+ const base::Time initial_browser_last_live_timestamp_;
+ const base::string16 backup_registry_key_;
+
+ DISALLOW_COPY_AND_ASSIGN(CleanExitBeacon);
+};
+
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_CLEAN_EXIT_BEACON_H_
diff --git a/components/metrics/client_info.cc b/components/metrics/client_info.cc
new file mode 100644
index 0000000..57ad394
--- /dev/null
+++ b/components/metrics/client_info.cc
@@ -0,0 +1,13 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/client_info.h"
+
+namespace metrics {
+
+ClientInfo::ClientInfo() : installation_date(0), reporting_enabled_date(0) {}
+
+ClientInfo::~ClientInfo() {}
+
+} // namespace metrics
diff --git a/components/metrics/client_info.h b/components/metrics/client_info.h
new file mode 100644
index 0000000..7dcf5d8
--- /dev/null
+++ b/components/metrics/client_info.h
@@ -0,0 +1,36 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_CLIENT_INFO_H_
+#define COMPONENTS_METRICS_CLIENT_INFO_H_
+
+#include <stdint.h>
+
+#include <string>
+
+#include "base/macros.h"
+
+namespace metrics {
+
+// A data object used to pass data from outside the metrics component into the
+// metrics component.
+struct ClientInfo {
+ public:
+ ClientInfo();
+ ~ClientInfo();
+
+ // The metrics ID of this client: represented as a GUID string.
+ std::string client_id;
+
+ // The installation date: represented as an epoch time in seconds.
+ int64_t installation_date;
+
+ // The date on which metrics reporting was enabled: represented as an epoch
+ // time in seconds.
+ int64_t reporting_enabled_date;
+};
+
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_CLIENT_INFO_H_
diff --git a/components/metrics/cloned_install_detector.cc b/components/metrics/cloned_install_detector.cc
new file mode 100644
index 0000000..3839744
--- /dev/null
+++ b/components/metrics/cloned_install_detector.cc
@@ -0,0 +1,98 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/cloned_install_detector.h"
+
+#include <stdint.h>
+
+#include <string>
+
+#include "base/bind.h"
+#include "base/location.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/metrics/metrics_hashes.h"
+#include "base/single_thread_task_runner.h"
+#include "base/task/post_task.h"
+#include "base/task_runner_util.h"
+#include "components/metrics/machine_id_provider.h"
+#include "components/metrics/metrics_pref_names.h"
+#include "components/prefs/pref_registry_simple.h"
+#include "components/prefs/pref_service.h"
+
+namespace metrics {
+
+namespace {
+
+uint32_t HashRawId(const std::string& value) {
+ uint64_t hash = base::HashMetricName(value);
+
+ // Only use 24 bits from the 64-bit hash.
+ return hash & ((1 << 24) - 1);
+}
+
+// State of the generated machine id in relation to the previously stored value.
+// Note: UMA histogram enum - don't re-order or remove entries
+enum MachineIdState {
+ ID_GENERATION_FAILED,
+ ID_NO_STORED_VALUE,
+ ID_CHANGED,
+ ID_UNCHANGED,
+ ID_ENUM_SIZE
+};
+
+// Logs the state of generating a machine id and comparing it to a stored value.
+void LogMachineIdState(MachineIdState state) {
+ UMA_HISTOGRAM_ENUMERATION("UMA.MachineIdState", state, ID_ENUM_SIZE);
+}
+
+} // namespace
+
+ClonedInstallDetector::ClonedInstallDetector() : weak_ptr_factory_(this) {}
+
+ClonedInstallDetector::~ClonedInstallDetector() {
+}
+
+void ClonedInstallDetector::CheckForClonedInstall(PrefService* local_state) {
+ base::PostTaskWithTraitsAndReplyWithResult(
+ FROM_HERE,
+ {base::MayBlock(), base::TaskPriority::BEST_EFFORT,
+ base::TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN},
+ base::Bind(&MachineIdProvider::GetMachineId),
+ base::Bind(&ClonedInstallDetector::SaveMachineId,
+ weak_ptr_factory_.GetWeakPtr(), local_state));
+}
+
+void ClonedInstallDetector::SaveMachineId(PrefService* local_state,
+ const std::string& raw_id) {
+ if (raw_id.empty()) {
+ LogMachineIdState(ID_GENERATION_FAILED);
+ local_state->ClearPref(prefs::kMetricsMachineId);
+ return;
+ }
+
+ int hashed_id = HashRawId(raw_id);
+
+ MachineIdState id_state = ID_NO_STORED_VALUE;
+ if (local_state->HasPrefPath(prefs::kMetricsMachineId)) {
+ if (local_state->GetInteger(prefs::kMetricsMachineId) != hashed_id) {
+ id_state = ID_CHANGED;
+ // TODO(jwd): Use a callback to set the reset pref. That way
+ // ClonedInstallDetector doesn't need to know about this pref.
+ local_state->SetBoolean(prefs::kMetricsResetIds, true);
+ } else {
+ id_state = ID_UNCHANGED;
+ }
+ }
+
+ LogMachineIdState(id_state);
+
+ local_state->SetInteger(prefs::kMetricsMachineId, hashed_id);
+}
+
+// static
+void ClonedInstallDetector::RegisterPrefs(PrefRegistrySimple* registry) {
+ registry->RegisterIntegerPref(prefs::kMetricsMachineId, 0);
+}
+
+} // namespace metrics
diff --git a/components/metrics/cloned_install_detector.h b/components/metrics/cloned_install_detector.h
new file mode 100644
index 0000000..6a728ae
--- /dev/null
+++ b/components/metrics/cloned_install_detector.h
@@ -0,0 +1,50 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_CLONED_INSTALL_DETECTOR_H_
+#define COMPONENTS_METRICS_CLONED_INSTALL_DETECTOR_H_
+
+#include "base/gtest_prod_util.h"
+#include "base/macros.h"
+#include "base/memory/weak_ptr.h"
+
+class PrefRegistrySimple;
+class PrefService;
+
+namespace metrics {
+
+// A class for detecting if an install is cloned. It does this by detecting
+// when the hardware running Chrome changes.
+class ClonedInstallDetector {
+ public:
+ ClonedInstallDetector();
+ virtual ~ClonedInstallDetector();
+
+ // Posts a task to |task_runner| to generate a machine ID and store it to a
+ // local state pref. If the newly generated ID is different than the
+ // previously stored one, then the install is considered cloned. The ID is a
+ // 24-bit value based off of machine characteristics. This value should never
+ // be sent over the network.
+ // TODO(jwd): Implement change detection.
+ void CheckForClonedInstall(PrefService* local_state);
+
+ static void RegisterPrefs(PrefRegistrySimple* registry);
+
+ private:
+ FRIEND_TEST_ALL_PREFIXES(ClonedInstallDetectorTest, SaveId);
+ FRIEND_TEST_ALL_PREFIXES(ClonedInstallDetectorTest, DetectClone);
+
+ // Converts raw_id into a 24-bit hash and stores the hash in |local_state|.
+ // |raw_id| is not a const ref because it's passed from a cross-thread post
+ // task.
+ void SaveMachineId(PrefService* local_state, const std::string& raw_id);
+
+ base::WeakPtrFactory<ClonedInstallDetector> weak_ptr_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(ClonedInstallDetector);
+};
+
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_CLONED_INSTALL_DETECTOR_H_
diff --git a/components/metrics/cloned_install_detector_unittest.cc b/components/metrics/cloned_install_detector_unittest.cc
new file mode 100644
index 0000000..2e2ebd7
--- /dev/null
+++ b/components/metrics/cloned_install_detector_unittest.cc
@@ -0,0 +1,49 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/cloned_install_detector.h"
+
+#include "components/metrics/machine_id_provider.h"
+#include "components/metrics/metrics_pref_names.h"
+#include "components/metrics/metrics_state_manager.h"
+#include "components/prefs/testing_pref_service.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace metrics {
+
+namespace {
+
+const std::string kTestRawId = "test";
+// Hashed machine id for |kTestRawId|.
+const int kTestHashedId = 2216819;
+
+} // namespace
+
+// TODO(jwd): Change these test to test the full flow and histogram outputs. It
+// should also remove the need to make the test a friend of
+// ClonedInstallDetector.
+TEST(ClonedInstallDetectorTest, SaveId) {
+ TestingPrefServiceSimple prefs;
+ ClonedInstallDetector::RegisterPrefs(prefs.registry());
+
+ ClonedInstallDetector detector;
+ detector.SaveMachineId(&prefs, kTestRawId);
+
+ EXPECT_EQ(kTestHashedId, prefs.GetInteger(prefs::kMetricsMachineId));
+}
+
+TEST(ClonedInstallDetectorTest, DetectClone) {
+ TestingPrefServiceSimple prefs;
+ MetricsStateManager::RegisterPrefs(prefs.registry());
+
+ // Save a machine id that will cause a clone to be detected.
+ prefs.SetInteger(prefs::kMetricsMachineId, kTestHashedId + 1);
+
+ ClonedInstallDetector detector;
+ detector.SaveMachineId(&prefs, kTestRawId);
+
+ EXPECT_TRUE(prefs.GetBoolean(prefs::kMetricsResetIds));
+}
+
+} // namespace metrics
diff --git a/components/metrics/component_metrics_provider.cc b/components/metrics/component_metrics_provider.cc
new file mode 100644
index 0000000..5ec24e2
--- /dev/null
+++ b/components/metrics/component_metrics_provider.cc
@@ -0,0 +1,109 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/component_metrics_provider.h"
+
+#include <map>
+#include <string>
+#include "base/strings/string_number_conversions.h"
+#include "components/component_updater/component_updater_service.h"
+#include "third_party/metrics_proto/system_profile.pb.h"
+
+namespace metrics {
+
+namespace {
+
+SystemProfileProto_ComponentId CrxIdToComponentId(const std::string& app_id) {
+ const static std::map<std::string, SystemProfileProto_ComponentId>
+ component_map = {
+ {"khaoiebndkojlmppeemjhbpbandiljpe",
+ SystemProfileProto_ComponentId_FILE_TYPE_POLICIES},
+ {"kfoklmclfodeliojeaekpoflbkkhojea",
+ SystemProfileProto_ComponentId_ORIGIN_TRIALS},
+ {"llkgjffcdpffmhiakmfcdcblohccpfmo",
+ SystemProfileProto_ComponentId_ORIGIN_TRIALS}, // Alternate ID
+ {"mimojjlkmoijpicakmndhoigimigcmbb",
+ SystemProfileProto_ComponentId_PEPPER_FLASH},
+ {"ckjlcfmdbdglblbjglepgnoekdnkoklc",
+ SystemProfileProto_ComponentId_PEPPER_FLASH_CHROMEOS},
+ {"hnimpnehoodheedghdeeijklkeaacbdc",
+ SystemProfileProto_ComponentId_PNACL},
+ {"npdjjkjlcidkjlamlmmdelcjbcpdjocm",
+ SystemProfileProto_ComponentId_RECOVERY},
+ {"giekcmmlnklenlaomppkphknjmnnpneh",
+ SystemProfileProto_ComponentId_SSL_ERROR_ASSISTANT},
+ {"ojjgnpkioondelmggbekfhllhdaimnho",
+ SystemProfileProto_ComponentId_STH_SET},
+ {"hfnkpimlhhgieaddgfemjhofmfblmnib",
+ SystemProfileProto_ComponentId_CRL_SET},
+ {"gcmjkmgdlgnkkcocmoeiminaijmmjnii",
+ SystemProfileProto_ComponentId_SUBRESOURCE_FILTER},
+ {"gkmgaooipdjhmangpemjhigmamcehddo",
+ SystemProfileProto_ComponentId_SW_REPORTER},
+ {"oimompecagnajdejgnnjijobebaeigek",
+ SystemProfileProto_ComponentId_WIDEVINE_CDM},
+ {"bjbdkfoakgmkndalgpadobhgbhhoanho",
+ SystemProfileProto_ComponentId_EPSON_INKJET_PRINTER_ESCPR},
+ {"ojnjgapiepgciobpecnafnoeaegllfld",
+ SystemProfileProto_ComponentId_CROS_TERMINA},
+ {"gncenodapghbnkfkoognegdnjoeegmkp",
+ SystemProfileProto_ComponentId_STAR_CUPS_DRIVER},
+ {"gelhpeofhffbaeegmemklllhfdifagmb",
+ SystemProfileProto_ComponentId_SPEECH_SYNTHESIS_SV_SE},
+ {"lmelglejhemejginpboagddgdfbepgmp",
+ SystemProfileProto_ComponentId_OPTIMIZATION_HINTS},
+ {"fookoiellkocclipolgaceabajejjcnp",
+ SystemProfileProto_ComponentId_DOWNLOADABLE_STRINGS},
+ {"cjfkbpdpjpdldhclahpfgnlhpodlpnba",
+ SystemProfileProto_ComponentId_VR_ASSETS},
+ {"gjpajnddmedjmcklfflllocelehklffm",
+ SystemProfileProto_ComponentId_RTANALYTICS_LIGHT},
+ {"mjdmdobabdmfcbaakcaadileafkmifen",
+ SystemProfileProto_ComponentId_RTANALYTICS_FULL},
+ {"fhbeibbmaepakgdkkmjgldjajgpkkhfj",
+ SystemProfileProto_ComponentId_CELLULAR}};
+ const auto result = component_map.find(app_id);
+ if (result == component_map.end())
+ return SystemProfileProto_ComponentId_UNKNOWN;
+ return result->second;
+}
+
+// Extract the first 32 bits of a fingerprint string, excluding the fingerprint
+// format specifier - see the fingerprint format specification at
+// https://github.com/google/omaha/blob/master/doc/ServerProtocolV3.md
+uint32_t Trim(const std::string& fp) {
+ const auto len_prefix = fp.find(".");
+ if (len_prefix == std::string::npos)
+ return 0;
+ uint32_t result = 0;
+ if (base::HexStringToUInt(fp.substr(len_prefix + 1, 8), &result))
+ return result;
+ return 0;
+}
+
+} // namespace
+
+ComponentMetricsProvider::ComponentMetricsProvider(
+ component_updater::ComponentUpdateService* component_update_service)
+ : component_update_service_(component_update_service) {}
+
+ComponentMetricsProvider::~ComponentMetricsProvider() = default;
+
+void ComponentMetricsProvider::ProvideSystemProfileMetrics(
+ SystemProfileProto* system_profile) {
+ for (const auto& component : component_update_service_->GetComponents()) {
+ const auto id = CrxIdToComponentId(component.id);
+ // Ignore any unknown components - in practice these are the
+ // SupervisedUserWhitelists, which we do not want to transmit to UMA or
+ // Crash.
+ if (id == SystemProfileProto_ComponentId_UNKNOWN)
+ continue;
+ auto* proto = system_profile->add_chrome_component();
+ proto->set_component_id(id);
+ proto->set_version(component.version.GetString());
+ proto->set_omaha_fingerprint(Trim(component.fingerprint));
+ }
+}
+
+} // namespace metrics
diff --git a/components/metrics/component_metrics_provider.h b/components/metrics/component_metrics_provider.h
new file mode 100644
index 0000000..7899b97
--- /dev/null
+++ b/components/metrics/component_metrics_provider.h
@@ -0,0 +1,37 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_COMPONENT_METRICS_PROVIDER_H_
+#define COMPONENTS_METRICS_COMPONENT_METRICS_PROVIDER_H_
+
+#include "components/metrics/metrics_provider.h"
+
+namespace component_updater {
+class ComponentUpdateService;
+}
+
+namespace metrics {
+
+class SystemProfileProto;
+
+// Stores and loads system information to prefs for stability logs.
+class ComponentMetricsProvider : public MetricsProvider {
+ public:
+ explicit ComponentMetricsProvider(
+ component_updater::ComponentUpdateService* component_update_service);
+ ~ComponentMetricsProvider() override;
+
+ // MetricsProvider:
+ void ProvideSystemProfileMetrics(
+ SystemProfileProto* system_profile_proto) override;
+
+ private:
+ component_updater::ComponentUpdateService* component_update_service_;
+
+ DISALLOW_COPY_AND_ASSIGN(ComponentMetricsProvider);
+};
+
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_COMPONENT_METRICS_PROVIDER_H_
diff --git a/components/metrics/component_metrics_provider_unittest.cc b/components/metrics/component_metrics_provider_unittest.cc
new file mode 100644
index 0000000..2fc9cae
--- /dev/null
+++ b/components/metrics/component_metrics_provider_unittest.cc
@@ -0,0 +1,58 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/component_metrics_provider.h"
+
+#include "base/strings/utf_string_conversions.h"
+#include "base/version.h"
+#include "components/component_updater/mock_component_updater_service.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/metrics_proto/system_profile.pb.h"
+
+namespace metrics {
+
+class ComponentMetricsProviderTest : public testing::Test {
+ public:
+ ComponentMetricsProviderTest() {}
+ ~ComponentMetricsProviderTest() override {}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ComponentMetricsProviderTest);
+};
+
+using component_updater::ComponentInfo;
+
+TEST_F(ComponentMetricsProviderTest, ProvideComponentMetrics) {
+ std::vector<ComponentInfo> components = {
+ ComponentInfo(
+ "hfnkpimlhhgieaddgfemjhofmfblmnib",
+ "1.0846414bf2025bbc067b6fa5b61b16eda2269d8712b8fec0973b4c71fdc65ca0",
+ base::ASCIIToUTF16("name1"), base::Version("1.2.3.4")),
+ ComponentInfo(
+ "oimompecagnajdejgnnjijobebaeigek",
+ "1.adc9207a4a88ee98bf9ddf0330f35818386f1adc006bc8eee94dc59d43c0f5d6",
+ base::ASCIIToUTF16("name2"), base::Version("5.6.7.8")),
+ ComponentInfo(
+ "thiscomponentfilteredfromresults",
+ "1.b5268dc93e08d68d0be26bd8fbbb15c7b7f805cc06b4abd9d49381bc178e78cf",
+ base::ASCIIToUTF16("name3"), base::Version("9.9.9.9"))};
+ component_updater::MockComponentUpdateService service;
+ EXPECT_CALL(service, GetComponents()).WillOnce(testing::Return(components));
+ ComponentMetricsProvider component_provider(&service);
+ SystemProfileProto system_profile;
+ component_provider.ProvideSystemProfileMetrics(&system_profile);
+
+ EXPECT_EQ(2, system_profile.chrome_component_size());
+ EXPECT_EQ(SystemProfileProto_ComponentId_CRL_SET,
+ system_profile.chrome_component(0).component_id());
+ EXPECT_EQ("1.2.3.4", system_profile.chrome_component(0).version());
+ EXPECT_EQ(138821963u, system_profile.chrome_component(0).omaha_fingerprint());
+ EXPECT_EQ(SystemProfileProto_ComponentId_WIDEVINE_CDM,
+ system_profile.chrome_component(1).component_id());
+ EXPECT_EQ("5.6.7.8", system_profile.chrome_component(1).version());
+ EXPECT_EQ(2915639418u,
+ system_profile.chrome_component(1).omaha_fingerprint());
+}
+
+} // namespace metrics
diff --git a/components/metrics/daily_event.cc b/components/metrics/daily_event.cc
new file mode 100644
index 0000000..befc2eb
--- /dev/null
+++ b/components/metrics/daily_event.cc
@@ -0,0 +1,96 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/daily_event.h"
+
+#include <utility>
+
+#include "base/metrics/histogram.h"
+#include "components/prefs/pref_registry_simple.h"
+#include "components/prefs/pref_service.h"
+
+namespace metrics {
+
+namespace {
+
+void RecordIntervalTypeHistogram(const std::string& histogram_name,
+ DailyEvent::IntervalType type) {
+ const int num_types = static_cast<int>(DailyEvent::IntervalType::NUM_TYPES);
+ base::Histogram::FactoryGet(histogram_name, 1, num_types, num_types + 1,
+ base::HistogramBase::kUmaTargetedHistogramFlag)
+ ->Add(static_cast<int>(type));
+}
+
+} // namespace
+
+DailyEvent::Observer::Observer() {
+}
+
+DailyEvent::Observer::~Observer() {
+}
+
+DailyEvent::DailyEvent(PrefService* pref_service,
+ const char* pref_name,
+ const std::string& histogram_name)
+ : pref_service_(pref_service),
+ pref_name_(pref_name),
+ histogram_name_(histogram_name) {
+}
+
+DailyEvent::~DailyEvent() {
+}
+
+// static
+void DailyEvent::RegisterPref(PrefRegistrySimple* registry,
+ const char* pref_name) {
+ registry->RegisterInt64Pref(pref_name, 0);
+}
+
+void DailyEvent::AddObserver(std::unique_ptr<DailyEvent::Observer> observer) {
+ DVLOG(2) << "DailyEvent observer added.";
+ DCHECK(last_fired_.is_null());
+ observers_.push_back(std::move(observer));
+}
+
+void DailyEvent::CheckInterval() {
+ base::Time now = base::Time::Now();
+ if (last_fired_.is_null()) {
+ // The first time we call CheckInterval, we read the time stored in prefs.
+ last_fired_ = base::Time() + base::TimeDelta::FromMicroseconds(
+ pref_service_->GetInt64(pref_name_));
+
+ DVLOG(1) << "DailyEvent time loaded: " << last_fired_;
+ if (last_fired_.is_null()) {
+ DVLOG(1) << "DailyEvent first run.";
+ RecordIntervalTypeHistogram(histogram_name_, IntervalType::FIRST_RUN);
+ OnInterval(now, IntervalType::FIRST_RUN);
+ return;
+ }
+ }
+ int days_elapsed = (now - last_fired_).InDays();
+ if (days_elapsed >= 1) {
+ DVLOG(1) << "DailyEvent day elapsed.";
+ RecordIntervalTypeHistogram(histogram_name_, IntervalType::DAY_ELAPSED);
+ OnInterval(now, IntervalType::DAY_ELAPSED);
+ } else if (days_elapsed <= -1) {
+ // The "last fired" time is more than a day in the future, so the clock
+ // must have been changed.
+ DVLOG(1) << "DailyEvent clock change detected.";
+ RecordIntervalTypeHistogram(histogram_name_, IntervalType::CLOCK_CHANGED);
+ OnInterval(now, IntervalType::CLOCK_CHANGED);
+ }
+}
+
+void DailyEvent::OnInterval(base::Time now, IntervalType type) {
+ DCHECK(!now.is_null());
+ last_fired_ = now;
+ pref_service_->SetInt64(pref_name_,
+ last_fired_.since_origin().InMicroseconds());
+
+ for (auto it = observers_.begin(); it != observers_.end(); ++it) {
+ (*it)->OnDailyEvent(type);
+ }
+}
+
+} // namespace metrics
diff --git a/components/metrics/daily_event.h b/components/metrics/daily_event.h
new file mode 100644
index 0000000..326cad4
--- /dev/null
+++ b/components/metrics/daily_event.h
@@ -0,0 +1,102 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_DAILY_EVENT_H_
+#define COMPONENTS_METRICS_DAILY_EVENT_H_
+
+#include <memory>
+#include <vector>
+
+#include "base/macros.h"
+#include "base/time/time.h"
+
+class PrefRegistrySimple;
+class PrefService;
+
+namespace metrics {
+
+// DailyEvent is used for throttling an event to about once per day, even if
+// the program is restarted more frequently. It is based on local machine
+// time, so it could be fired more often if the clock is changed.
+//
+// The service using the DailyEvent should first provide all of the Observers
+// for the interval, and then arrange for CheckInterval() to be called
+// periodically to test if the event should be fired.
+class DailyEvent {
+ public:
+ // Different reasons that Observer::OnDailyEvent() is called.
+ // This enum is used for histograms and must not be renumbered.
+ enum class IntervalType {
+ FIRST_RUN,
+ DAY_ELAPSED,
+ CLOCK_CHANGED,
+ NUM_TYPES,
+ };
+
+ // Observer receives notifications from a DailyEvent.
+ // Observers must be added before the DailyEvent begins checking time,
+ // and will be owned by the DailyEvent.
+ class Observer {
+ public:
+ Observer();
+ virtual ~Observer();
+
+ // Called when the daily event is fired.
+ virtual void OnDailyEvent(IntervalType type) = 0;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(Observer);
+ };
+
+ // Constructs DailyEvent monitor which stores the time it last fired in the
+ // preference |pref_name|. |pref_name| should be registered by calling
+ // RegisterPref before using this object.
+ // Caller is responsible for ensuring that |pref_service| and |pref_name|
+ // outlive the DailyEvent.
+ // |histogram_name| is the name of the UMA metric which record when this
+ // interval fires, and should be registered in histograms.xml
+ DailyEvent(PrefService* pref_service,
+ const char* pref_name,
+ const std::string& histogram_name);
+ ~DailyEvent();
+
+ // Adds a observer to be notified when a day elapses. All observers should
+ // be registered before the the DailyEvent starts checking time.
+ void AddObserver(std::unique_ptr<Observer> observer);
+
+ // Checks if a day has elapsed. If it has, OnDailyEvent will be called on
+ // all observers.
+ void CheckInterval();
+
+ // Registers the preference used by this interval.
+ static void RegisterPref(PrefRegistrySimple* registry, const char* pref_name);
+
+ private:
+ // Handles an interval elapsing because of |type|.
+ void OnInterval(base::Time now, IntervalType type);
+
+ // A weak pointer to the PrefService object to read and write preferences
+ // from. Calling code should ensure this object continues to exist for the
+ // lifetime of the DailyEvent object.
+ PrefService* pref_service_;
+
+ // The name of the preference to store the last fired time in.
+ // Calling code should ensure this outlives the DailyEvent.
+ const char* pref_name_;
+
+ // The name of the histogram to record intervals.
+ std::string histogram_name_;
+
+ // A list of observers.
+ std::vector<std::unique_ptr<Observer>> observers_;
+
+ // The time that the daily event was last fired.
+ base::Time last_fired_;
+
+ DISALLOW_COPY_AND_ASSIGN(DailyEvent);
+};
+
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_DAILY_EVENT_H_
diff --git a/components/metrics/daily_event_unittest.cc b/components/metrics/daily_event_unittest.cc
new file mode 100644
index 0000000..f9a6f52
--- /dev/null
+++ b/components/metrics/daily_event_unittest.cc
@@ -0,0 +1,98 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/daily_event.h"
+
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/optional.h"
+#include "components/prefs/testing_pref_service.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace metrics {
+
+namespace {
+
+const char kTestPrefName[] = "TestPref";
+const char kTestMetricName[] = "TestMetric";
+
+class TestDailyObserver : public DailyEvent::Observer {
+ public:
+ TestDailyObserver() = default;
+
+ bool fired() const { return type_.has_value(); }
+ DailyEvent::IntervalType type() const { return type_.value(); }
+
+ void OnDailyEvent(DailyEvent::IntervalType type) override { type_ = type; }
+
+ void Reset() { type_ = {}; }
+
+ private:
+ // Last-received type, or unset if OnDailyEvent() hasn't been called.
+ base::Optional<DailyEvent::IntervalType> type_;
+
+ DISALLOW_COPY_AND_ASSIGN(TestDailyObserver);
+};
+
+class DailyEventTest : public testing::Test {
+ public:
+ DailyEventTest() : event_(&prefs_, kTestPrefName, kTestMetricName) {
+ DailyEvent::RegisterPref(prefs_.registry(), kTestPrefName);
+ observer_ = new TestDailyObserver();
+ event_.AddObserver(base::WrapUnique(observer_));
+ }
+
+ protected:
+ TestingPrefServiceSimple prefs_;
+ TestDailyObserver* observer_;
+ DailyEvent event_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(DailyEventTest);
+};
+
+} // namespace
+
+// The event should fire if the preference is not available.
+TEST_F(DailyEventTest, TestNewFires) {
+ event_.CheckInterval();
+ ASSERT_TRUE(observer_->fired());
+ EXPECT_EQ(DailyEvent::IntervalType::FIRST_RUN, observer_->type());
+}
+
+// The event should fire if the preference is more than a day old.
+TEST_F(DailyEventTest, TestOldFires) {
+ base::Time last_time = base::Time::Now() - base::TimeDelta::FromHours(25);
+ prefs_.SetInt64(kTestPrefName, last_time.since_origin().InMicroseconds());
+ event_.CheckInterval();
+ ASSERT_TRUE(observer_->fired());
+ EXPECT_EQ(DailyEvent::IntervalType::DAY_ELAPSED, observer_->type());
+}
+
+// The event should fire if the preference is more than a day in the future.
+TEST_F(DailyEventTest, TestFutureFires) {
+ base::Time last_time = base::Time::Now() + base::TimeDelta::FromHours(25);
+ prefs_.SetInt64(kTestPrefName, last_time.since_origin().InMicroseconds());
+ event_.CheckInterval();
+ ASSERT_TRUE(observer_->fired());
+ EXPECT_EQ(DailyEvent::IntervalType::CLOCK_CHANGED, observer_->type());
+}
+
+// The event should not fire if the preference is more recent than a day.
+TEST_F(DailyEventTest, TestRecentNotFired) {
+ base::Time last_time = base::Time::Now() - base::TimeDelta::FromMinutes(2);
+ prefs_.SetInt64(kTestPrefName, last_time.since_origin().InMicroseconds());
+ event_.CheckInterval();
+ EXPECT_FALSE(observer_->fired());
+}
+
+// The event should not fire if the preference is less than a day in the future.
+TEST_F(DailyEventTest, TestSoonNotFired) {
+ base::Time last_time = base::Time::Now() + base::TimeDelta::FromMinutes(2);
+ prefs_.SetInt64(kTestPrefName, last_time.since_origin().InMicroseconds());
+ event_.CheckInterval();
+ EXPECT_FALSE(observer_->fired());
+}
+
+} // namespace metrics
diff --git a/components/metrics/data_use_tracker.cc b/components/metrics/data_use_tracker.cc
new file mode 100644
index 0000000..61be02f
--- /dev/null
+++ b/components/metrics/data_use_tracker.cc
@@ -0,0 +1,185 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/data_use_tracker.h"
+
+#include <string>
+
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/stringprintf.h"
+#include "components/metrics/metrics_pref_names.h"
+#include "components/prefs/scoped_user_pref_update.h"
+#include "components/variations/variations_associated_data.h"
+
+namespace metrics {
+
+namespace {
+
+// Default weekly quota and allowed UMA ratio for UMA log uploads for Android.
+// These defaults will not be used for non-Android as |DataUseTracker| will not
+// be initialized. Default values can be overridden by variation params.
+const int kDefaultUMAWeeklyQuotaBytes = 204800;
+const double kDefaultUMARatio = 0.05;
+
+} // namespace
+
+DataUseTracker::DataUseTracker(PrefService* local_state)
+ : local_state_(local_state) {}
+
+DataUseTracker::~DataUseTracker() {}
+
+// static
+std::unique_ptr<DataUseTracker> DataUseTracker::Create(
+ PrefService* local_state) {
+ std::unique_ptr<DataUseTracker> data_use_tracker;
+#if defined(OS_ANDROID)
+ data_use_tracker.reset(new DataUseTracker(local_state));
+#endif
+ return data_use_tracker;
+}
+
+// static
+void DataUseTracker::RegisterPrefs(PrefRegistrySimple* registry) {
+ registry->RegisterDictionaryPref(metrics::prefs::kUserCellDataUse);
+ registry->RegisterDictionaryPref(metrics::prefs::kUmaCellDataUse);
+}
+
+void DataUseTracker::UpdateMetricsUsagePrefs(const std::string& service_name,
+ int message_size,
+ bool is_cellular) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+
+ if (!is_cellular)
+ return;
+
+ UpdateUsagePref(prefs::kUserCellDataUse, message_size);
+ // TODO(holte): Consider adding seperate tracking for UKM.
+ if (service_name == "UMA" || service_name == "UKM")
+ UpdateUsagePref(prefs::kUmaCellDataUse, message_size);
+}
+
+bool DataUseTracker::ShouldUploadLogOnCellular(int log_bytes) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+
+ RemoveExpiredEntries();
+
+ int uma_weekly_quota_bytes;
+ if (!GetUmaWeeklyQuota(&uma_weekly_quota_bytes))
+ return true;
+
+ int uma_total_data_use = ComputeTotalDataUse(prefs::kUmaCellDataUse);
+ int new_uma_total_data_use = log_bytes + uma_total_data_use;
+ // If the new log doesn't increase the total UMA traffic to be above the
+ // allowed quota then the log should be uploaded.
+ if (new_uma_total_data_use <= uma_weekly_quota_bytes)
+ return true;
+
+ double uma_ratio;
+ if (!GetUmaRatio(&uma_ratio))
+ return true;
+
+ int user_total_data_use = ComputeTotalDataUse(prefs::kUserCellDataUse);
+ // If after adding the new log the uma ratio is still under the allowed ratio
+ // then the log should be uploaded and vice versa.
+ return new_uma_total_data_use /
+ static_cast<double>(log_bytes + user_total_data_use) <=
+ uma_ratio;
+}
+
+void DataUseTracker::UpdateUsagePref(const std::string& pref_name,
+ int message_size) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+
+ DictionaryPrefUpdate pref_updater(local_state_, pref_name);
+ int todays_traffic = 0;
+ std::string todays_key = GetCurrentMeasurementDateAsString();
+
+ const base::DictionaryValue* user_pref_dict =
+ local_state_->GetDictionary(pref_name);
+ user_pref_dict->GetInteger(todays_key, &todays_traffic);
+ pref_updater->SetInteger(todays_key, todays_traffic + message_size);
+}
+
+void DataUseTracker::RemoveExpiredEntries() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ RemoveExpiredEntriesForPref(prefs::kUmaCellDataUse);
+ RemoveExpiredEntriesForPref(prefs::kUserCellDataUse);
+}
+
+void DataUseTracker::RemoveExpiredEntriesForPref(const std::string& pref_name) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+
+ const base::DictionaryValue* user_pref_dict =
+ local_state_->GetDictionary(pref_name);
+ const base::Time current_date = GetCurrentMeasurementDate();
+ const base::Time week_ago = current_date - base::TimeDelta::FromDays(7);
+
+ base::DictionaryValue user_pref_new_dict;
+ for (base::DictionaryValue::Iterator it(*user_pref_dict); !it.IsAtEnd();
+ it.Advance()) {
+ base::Time key_date;
+ if (base::Time::FromUTCString(it.key().c_str(), &key_date) &&
+ key_date > week_ago)
+ user_pref_new_dict.Set(it.key(), it.value().CreateDeepCopy());
+ }
+ local_state_->Set(pref_name, user_pref_new_dict);
+}
+
+// Note: We compute total data use regardless of what is the current date. In
+// scenario when user travels back in time zone and current date becomes earlier
+// than latest registered date in perf, we still count that in total use as user
+// actually used that data.
+int DataUseTracker::ComputeTotalDataUse(const std::string& pref_name) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+
+ int total_data_use = 0;
+ const base::DictionaryValue* pref_dict =
+ local_state_->GetDictionary(pref_name);
+ for (base::DictionaryValue::Iterator it(*pref_dict); !it.IsAtEnd();
+ it.Advance()) {
+ int value = 0;
+ it.value().GetAsInteger(&value);
+ total_data_use += value;
+ }
+ return total_data_use;
+}
+
+bool DataUseTracker::GetUmaWeeklyQuota(int* uma_weekly_quota_bytes) const {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+
+ std::string param_value_str = variations::GetVariationParamValue(
+ "UMA_EnableCellularLogUpload", "Uma_Quota");
+ if (param_value_str.empty())
+ *uma_weekly_quota_bytes = kDefaultUMAWeeklyQuotaBytes;
+ else
+ base::StringToInt(param_value_str, uma_weekly_quota_bytes);
+ return true;
+}
+
+bool DataUseTracker::GetUmaRatio(double* ratio) const {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+
+ std::string param_value_str = variations::GetVariationParamValue(
+ "UMA_EnableCellularLogUpload", "Uma_Ratio");
+ if (param_value_str.empty())
+ *ratio = kDefaultUMARatio;
+ else
+ base::StringToDouble(param_value_str, ratio);
+ return true;
+}
+
+base::Time DataUseTracker::GetCurrentMeasurementDate() const {
+ return base::Time::Now().LocalMidnight();
+}
+
+std::string DataUseTracker::GetCurrentMeasurementDateAsString() const {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+
+ base::Time::Exploded today_exploded;
+ GetCurrentMeasurementDate().LocalExplode(&today_exploded);
+ return base::StringPrintf("%04d-%02d-%02d", today_exploded.year,
+ today_exploded.month, today_exploded.day_of_month);
+}
+
+} // namespace metrics
diff --git a/components/metrics/data_use_tracker.h b/components/metrics/data_use_tracker.h
new file mode 100644
index 0000000..104223e
--- /dev/null
+++ b/components/metrics/data_use_tracker.h
@@ -0,0 +1,88 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_DATA_USE_TRACKER_H_
+#define COMPONENTS_METRICS_DATA_USE_TRACKER_H_
+
+#include <string>
+
+#include "base/callback.h"
+#include "base/gtest_prod_util.h"
+#include "base/macros.h"
+#include "base/sequence_checker.h"
+#include "base/time/time.h"
+#include "components/prefs/pref_registry_simple.h"
+#include "components/prefs/pref_service.h"
+
+namespace metrics {
+
+typedef base::Callback<void(const std::string&, int, bool)>
+ UpdateUsagePrefCallbackType;
+
+// Records the data use of user traffic and UMA traffic in user prefs. Taking
+// into account those prefs it can verify whether certain UMA log upload is
+// allowed.
+class DataUseTracker {
+ public:
+ explicit DataUseTracker(PrefService* local_state);
+ virtual ~DataUseTracker();
+
+ // Returns an instance of |DataUseTracker| with provided |local_state| if
+ // users data use should be tracked and null pointer otherwise.
+ static std::unique_ptr<DataUseTracker> Create(PrefService* local_state);
+
+ // Registers data use prefs using provided |registry|.
+ static void RegisterPrefs(PrefRegistrySimple* registry);
+
+ // Updates data usage tracking prefs with the specified values.
+ void UpdateMetricsUsagePrefs(const std::string& service_name,
+ int message_size,
+ bool is_cellular);
+
+ // Returns whether a log with provided |log_bytes| can be uploaded according
+ // to data use ratio and UMA quota provided by variations.
+ bool ShouldUploadLogOnCellular(int log_bytes);
+
+ private:
+ FRIEND_TEST_ALL_PREFIXES(DataUseTrackerTest, CheckUpdateUsagePref);
+ FRIEND_TEST_ALL_PREFIXES(DataUseTrackerTest, CheckRemoveExpiredEntries);
+ FRIEND_TEST_ALL_PREFIXES(DataUseTrackerTest, CheckComputeTotalDataUse);
+ FRIEND_TEST_ALL_PREFIXES(DataUseTrackerTest, CheckCanUploadUMALog);
+
+ // Updates provided |pref_name| for a current date with the given message
+ // size.
+ void UpdateUsagePref(const std::string& pref_name, int message_size);
+
+ // Removes entries from the all data use prefs.
+ void RemoveExpiredEntries();
+
+ // Removes entries from the given |pref_name| if they are more than 7 days
+ // old.
+ void RemoveExpiredEntriesForPref(const std::string& pref_name);
+
+ // Computes data usage according to all the entries in the given dictionary
+ // pref.
+ int ComputeTotalDataUse(const std::string& pref_name);
+
+ // Returns the weekly allowed quota for UMA data use.
+ virtual bool GetUmaWeeklyQuota(int* uma_weekly_quota_bytes) const;
+
+ // Returns the allowed ratio for UMA data use over overall data use.
+ virtual bool GetUmaRatio(double* ratio) const;
+
+ // Returns the current date for measurement.
+ virtual base::Time GetCurrentMeasurementDate() const;
+
+ // Returns the current date as a string with a proper formatting.
+ virtual std::string GetCurrentMeasurementDateAsString() const;
+
+ PrefService* local_state_;
+
+ SEQUENCE_CHECKER(sequence_checker_);
+
+ DISALLOW_COPY_AND_ASSIGN(DataUseTracker);
+};
+
+} // namespace metrics
+#endif // COMPONENTS_METRICS_DATA_USE_TRACKER_H_
diff --git a/components/metrics/data_use_tracker_unittest.cc b/components/metrics/data_use_tracker_unittest.cc
new file mode 100644
index 0000000..a271b5d
--- /dev/null
+++ b/components/metrics/data_use_tracker_unittest.cc
@@ -0,0 +1,203 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/data_use_tracker.h"
+
+#include "base/strings/stringprintf.h"
+#include "components/metrics/metrics_pref_names.h"
+#include "components/prefs/pref_registry_simple.h"
+#include "components/prefs/testing_pref_service.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace metrics {
+
+namespace {
+
+const char kTodayStr[] = "2016-03-16";
+const char kYesterdayStr[] = "2016-03-15";
+const char kExpiredDateStr1[] = "2016-03-09";
+const char kExpiredDateStr2[] = "2016-03-01";
+
+class TestDataUsePrefService : public TestingPrefServiceSimple {
+ public:
+ TestDataUsePrefService() { DataUseTracker::RegisterPrefs(registry()); }
+
+ void ClearDataUsePrefs() {
+ ClearPref(metrics::prefs::kUserCellDataUse);
+ ClearPref(metrics::prefs::kUmaCellDataUse);
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(TestDataUsePrefService);
+};
+
+class FakeDataUseTracker : public DataUseTracker {
+ public:
+ FakeDataUseTracker(PrefService* local_state) : DataUseTracker(local_state) {}
+
+ bool GetUmaWeeklyQuota(int* uma_weekly_quota_bytes) const override {
+ *uma_weekly_quota_bytes = 200;
+ return true;
+ }
+
+ bool GetUmaRatio(double* ratio) const override {
+ *ratio = 0.05;
+ return true;
+ }
+
+ base::Time GetCurrentMeasurementDate() const override {
+ base::Time today_for_test;
+ EXPECT_TRUE(base::Time::FromUTCString(kTodayStr, &today_for_test));
+ return today_for_test;
+ }
+
+ std::string GetCurrentMeasurementDateAsString() const override {
+ return kTodayStr;
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(FakeDataUseTracker);
+};
+
+// Sets up data usage prefs with mock values so that UMA traffic is above the
+// allowed ratio.
+void SetPrefTestValuesOverRatio(PrefService* local_state) {
+ base::DictionaryValue user_pref_dict;
+ user_pref_dict.SetInteger(kTodayStr, 2 * 100);
+ user_pref_dict.SetInteger(kYesterdayStr, 2 * 100);
+ user_pref_dict.SetInteger(kExpiredDateStr1, 2 * 100);
+ user_pref_dict.SetInteger(kExpiredDateStr2, 2 * 100);
+ local_state->Set(prefs::kUserCellDataUse, user_pref_dict);
+
+ base::DictionaryValue uma_pref_dict;
+ uma_pref_dict.SetInteger(kTodayStr, 50);
+ uma_pref_dict.SetInteger(kYesterdayStr, 50);
+ uma_pref_dict.SetInteger(kExpiredDateStr1, 50);
+ uma_pref_dict.SetInteger(kExpiredDateStr2, 50);
+ local_state->Set(prefs::kUmaCellDataUse, uma_pref_dict);
+}
+
+// Sets up data usage prefs with mock values which can be valid.
+void SetPrefTestValuesValidRatio(PrefService* local_state) {
+ base::DictionaryValue user_pref_dict;
+ user_pref_dict.SetInteger(kTodayStr, 100 * 100);
+ user_pref_dict.SetInteger(kYesterdayStr, 100 * 100);
+ user_pref_dict.SetInteger(kExpiredDateStr1, 100 * 100);
+ user_pref_dict.SetInteger(kExpiredDateStr2, 100 * 100);
+ local_state->Set(prefs::kUserCellDataUse, user_pref_dict);
+
+ // Should be 4% of user traffic
+ base::DictionaryValue uma_pref_dict;
+ uma_pref_dict.SetInteger(kTodayStr, 4 * 100);
+ uma_pref_dict.SetInteger(kYesterdayStr, 4 * 100);
+ uma_pref_dict.SetInteger(kExpiredDateStr1, 4 * 100);
+ uma_pref_dict.SetInteger(kExpiredDateStr2, 4 * 100);
+ local_state->Set(prefs::kUmaCellDataUse, uma_pref_dict);
+}
+
+} // namespace
+
+TEST(DataUseTrackerTest, CheckUpdateUsagePref) {
+ TestDataUsePrefService local_state;
+ FakeDataUseTracker data_use_tracker(&local_state);
+ local_state.ClearDataUsePrefs();
+
+ int user_pref_value = 0;
+ int uma_pref_value = 0;
+
+ data_use_tracker.UpdateMetricsUsagePrefs("", 2 * 100, true);
+ local_state.GetDictionary(prefs::kUserCellDataUse)
+ ->GetInteger(kTodayStr, &user_pref_value);
+ EXPECT_EQ(2 * 100, user_pref_value);
+ local_state.GetDictionary(prefs::kUmaCellDataUse)
+ ->GetInteger(kTodayStr, &uma_pref_value);
+ EXPECT_EQ(0, uma_pref_value);
+
+ data_use_tracker.UpdateMetricsUsagePrefs("UMA", 100, true);
+ local_state.GetDictionary(prefs::kUserCellDataUse)
+ ->GetInteger(kTodayStr, &user_pref_value);
+ EXPECT_EQ(3 * 100, user_pref_value);
+ local_state.GetDictionary(prefs::kUmaCellDataUse)
+ ->GetInteger(kTodayStr, &uma_pref_value);
+ EXPECT_EQ(100, uma_pref_value);
+}
+
+TEST(DataUseTrackerTest, CheckRemoveExpiredEntries) {
+ TestDataUsePrefService local_state;
+ FakeDataUseTracker data_use_tracker(&local_state);
+ local_state.ClearDataUsePrefs();
+ SetPrefTestValuesOverRatio(&local_state);
+ data_use_tracker.RemoveExpiredEntries();
+
+ int user_pref_value = 0;
+ int uma_pref_value = 0;
+
+ local_state.GetDictionary(prefs::kUserCellDataUse)
+ ->GetInteger(kExpiredDateStr1, &user_pref_value);
+ EXPECT_EQ(0, user_pref_value);
+ local_state.GetDictionary(prefs::kUmaCellDataUse)
+ ->GetInteger(kExpiredDateStr1, &uma_pref_value);
+ EXPECT_EQ(0, uma_pref_value);
+
+ local_state.GetDictionary(prefs::kUserCellDataUse)
+ ->GetInteger(kExpiredDateStr2, &user_pref_value);
+ EXPECT_EQ(0, user_pref_value);
+ local_state.GetDictionary(prefs::kUmaCellDataUse)
+ ->GetInteger(kExpiredDateStr2, &uma_pref_value);
+ EXPECT_EQ(0, uma_pref_value);
+
+ local_state.GetDictionary(prefs::kUserCellDataUse)
+ ->GetInteger(kTodayStr, &user_pref_value);
+ EXPECT_EQ(2 * 100, user_pref_value);
+ local_state.GetDictionary(prefs::kUmaCellDataUse)
+ ->GetInteger(kTodayStr, &uma_pref_value);
+ EXPECT_EQ(50, uma_pref_value);
+
+ local_state.GetDictionary(prefs::kUserCellDataUse)
+ ->GetInteger(kYesterdayStr, &user_pref_value);
+ EXPECT_EQ(2 * 100, user_pref_value);
+ local_state.GetDictionary(prefs::kUmaCellDataUse)
+ ->GetInteger(kYesterdayStr, &uma_pref_value);
+ EXPECT_EQ(50, uma_pref_value);
+}
+
+TEST(DataUseTrackerTest, CheckComputeTotalDataUse) {
+ TestDataUsePrefService local_state;
+ FakeDataUseTracker data_use_tracker(&local_state);
+ local_state.ClearDataUsePrefs();
+ SetPrefTestValuesOverRatio(&local_state);
+
+ int user_data_use =
+ data_use_tracker.ComputeTotalDataUse(prefs::kUserCellDataUse);
+ EXPECT_EQ(8 * 100, user_data_use);
+ int uma_data_use =
+ data_use_tracker.ComputeTotalDataUse(prefs::kUmaCellDataUse);
+ EXPECT_EQ(4 * 50, uma_data_use);
+}
+
+TEST(DataUseTrackerTest, CheckShouldUploadLogOnCellular) {
+ TestDataUsePrefService local_state;
+ FakeDataUseTracker data_use_tracker(&local_state);
+ local_state.ClearDataUsePrefs();
+ SetPrefTestValuesOverRatio(&local_state);
+
+ bool can_upload = data_use_tracker.ShouldUploadLogOnCellular(50);
+ EXPECT_TRUE(can_upload);
+ can_upload = data_use_tracker.ShouldUploadLogOnCellular(100);
+ EXPECT_TRUE(can_upload);
+ can_upload = data_use_tracker.ShouldUploadLogOnCellular(150);
+ EXPECT_FALSE(can_upload);
+
+ local_state.ClearDataUsePrefs();
+ SetPrefTestValuesValidRatio(&local_state);
+ can_upload = data_use_tracker.ShouldUploadLogOnCellular(100);
+ EXPECT_TRUE(can_upload);
+ // this is about 0.49%
+ can_upload = data_use_tracker.ShouldUploadLogOnCellular(200);
+ EXPECT_TRUE(can_upload);
+ can_upload = data_use_tracker.ShouldUploadLogOnCellular(300);
+ EXPECT_FALSE(can_upload);
+}
+
+} // namespace metrics
diff --git a/components/metrics/delegating_provider.cc b/components/metrics/delegating_provider.cc
new file mode 100644
index 0000000..2bb2013
--- /dev/null
+++ b/components/metrics/delegating_provider.cc
@@ -0,0 +1,112 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/delegating_provider.h"
+
+#include "base/barrier_closure.h"
+
+namespace metrics {
+
+DelegatingProvider::DelegatingProvider() = default;
+
+DelegatingProvider::~DelegatingProvider() = default;
+
+void DelegatingProvider::RegisterMetricsProvider(
+ std::unique_ptr<MetricsProvider> provider) {
+ metrics_providers_.push_back(std::move(provider));
+}
+
+const std::vector<std::unique_ptr<MetricsProvider>>&
+DelegatingProvider::GetProviders() {
+ return metrics_providers_;
+}
+
+void DelegatingProvider::Init() {
+ for (auto& provider : metrics_providers_)
+ provider->Init();
+}
+
+void DelegatingProvider::AsyncInit(const base::Closure& done_callback) {
+ base::Closure barrier =
+ base::BarrierClosure(metrics_providers_.size(), done_callback);
+ for (auto& provider : metrics_providers_) {
+ provider->AsyncInit(barrier);
+ }
+}
+
+void DelegatingProvider::OnDidCreateMetricsLog() {
+ for (auto& provider : metrics_providers_)
+ provider->OnDidCreateMetricsLog();
+}
+
+void DelegatingProvider::OnRecordingEnabled() {
+ for (auto& provider : metrics_providers_)
+ provider->OnRecordingEnabled();
+}
+
+void DelegatingProvider::OnRecordingDisabled() {
+ for (auto& provider : metrics_providers_)
+ provider->OnRecordingDisabled();
+}
+
+void DelegatingProvider::OnAppEnterBackground() {
+ for (auto& provider : metrics_providers_)
+ provider->OnAppEnterBackground();
+}
+
+bool DelegatingProvider::ProvideIndependentMetrics(
+ SystemProfileProto* system_profile_proto,
+ base::HistogramSnapshotManager* snapshot_manager) {
+ // These are collected seperately for each provider.
+ NOTREACHED();
+ return false;
+}
+
+void DelegatingProvider::ProvideSystemProfileMetrics(
+ SystemProfileProto* system_profile_proto) {
+ for (auto& provider : metrics_providers_)
+ provider->ProvideSystemProfileMetrics(system_profile_proto);
+}
+
+bool DelegatingProvider::HasPreviousSessionData() {
+ // All providers are queried (rather than stopping after the first "true"
+ // response) in case they do any kind of setup work in preparation for
+ // the later call to RecordInitialHistogramSnapshots().
+ bool has_stability_metrics = false;
+ for (auto& provider : metrics_providers_)
+ has_stability_metrics |= provider->HasPreviousSessionData();
+
+ return has_stability_metrics;
+}
+
+void DelegatingProvider::ProvidePreviousSessionData(
+ ChromeUserMetricsExtension* uma_proto) {
+ for (const auto& provider : metrics_providers_)
+ provider->ProvidePreviousSessionData(uma_proto);
+}
+
+void DelegatingProvider::ProvideCurrentSessionData(
+ ChromeUserMetricsExtension* uma_proto) {
+ for (const auto& provider : metrics_providers_)
+ provider->ProvideCurrentSessionData(uma_proto);
+}
+
+void DelegatingProvider::ClearSavedStabilityMetrics() {
+ for (auto& provider : metrics_providers_)
+ provider->ClearSavedStabilityMetrics();
+}
+
+void DelegatingProvider::RecordHistogramSnapshots(
+ base::HistogramSnapshotManager* snapshot_manager) {
+ for (auto& provider : metrics_providers_)
+ provider->RecordHistogramSnapshots(snapshot_manager);
+}
+
+void DelegatingProvider::RecordInitialHistogramSnapshots(
+ base::HistogramSnapshotManager* snapshot_manager) {
+ for (auto& provider : metrics_providers_)
+ provider->RecordInitialHistogramSnapshots(snapshot_manager);
+}
+
+} // namespace metrics
diff --git a/components/metrics/delegating_provider.h b/components/metrics/delegating_provider.h
new file mode 100644
index 0000000..55c7144
--- /dev/null
+++ b/components/metrics/delegating_provider.h
@@ -0,0 +1,60 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_DELEGATING_PROVIDER_H_
+#define COMPONENTS_METRICS_DELEGATING_PROVIDER_H_
+
+#include <memory>
+#include <vector>
+
+#include "components/metrics/metrics_provider.h"
+
+namespace metrics {
+
+// A MetricsProvider which manages a set of other MetricsProviders.
+// Calls to this providers methods are forwarded to all of the registered
+// metrics providers, allowing the group to be handled as a single provider.
+class DelegatingProvider final : public MetricsProvider {
+ public:
+ DelegatingProvider();
+ ~DelegatingProvider() override;
+
+ // Registers an additional MetricsProvider to forward calls to.
+ void RegisterMetricsProvider(std::unique_ptr<MetricsProvider> delegate);
+
+ // Gets the list of registered providers.
+ const std::vector<std::unique_ptr<MetricsProvider>>& GetProviders();
+
+ // MetricsProvider:
+ void Init() override;
+ void AsyncInit(const base::Closure& done_callback) override;
+ void OnDidCreateMetricsLog() override;
+ void OnRecordingEnabled() override;
+ void OnRecordingDisabled() override;
+ void OnAppEnterBackground() override;
+ bool ProvideIndependentMetrics(
+ SystemProfileProto* system_profile_proto,
+ base::HistogramSnapshotManager* snapshot_manager) override;
+ void ProvideSystemProfileMetrics(
+ SystemProfileProto* system_profile_proto) override;
+ bool HasPreviousSessionData() override;
+ void ProvidePreviousSessionData(
+ ChromeUserMetricsExtension* uma_proto) override;
+ void ProvideCurrentSessionData(
+ ChromeUserMetricsExtension* uma_proto) override;
+ void ClearSavedStabilityMetrics() override;
+ void RecordHistogramSnapshots(
+ base::HistogramSnapshotManager* snapshot_manager) override;
+ void RecordInitialHistogramSnapshots(
+ base::HistogramSnapshotManager* snapshot_manager) override;
+
+ private:
+ std::vector<std::unique_ptr<MetricsProvider>> metrics_providers_;
+
+ DISALLOW_COPY_AND_ASSIGN(DelegatingProvider);
+};
+
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_DELEGATING_PROVIDER_H_
diff --git a/components/metrics/drive_metrics_provider.cc b/components/metrics/drive_metrics_provider.cc
new file mode 100644
index 0000000..bb92915
--- /dev/null
+++ b/components/metrics/drive_metrics_provider.cc
@@ -0,0 +1,100 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/drive_metrics_provider.h"
+
+#include "base/base_paths.h"
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/files/file_path.h"
+#include "base/location.h"
+#include "base/logging.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/path_service.h"
+#include "base/task/post_task.h"
+#include "base/task/task_traits.h"
+#include "base/threading/scoped_blocking_call.h"
+#include "base/time/time.h"
+
+namespace metrics {
+
+DriveMetricsProvider::DriveMetricsProvider(int local_state_path_key)
+ : local_state_path_key_(local_state_path_key), weak_ptr_factory_(this) {}
+
+DriveMetricsProvider::~DriveMetricsProvider() {}
+
+void DriveMetricsProvider::ProvideSystemProfileMetrics(
+ metrics::SystemProfileProto* system_profile_proto) {
+ auto* hardware = system_profile_proto->mutable_hardware();
+ FillDriveMetrics(metrics_.app_drive, hardware->mutable_app_drive());
+ FillDriveMetrics(metrics_.user_data_drive,
+ hardware->mutable_user_data_drive());
+}
+
+void DriveMetricsProvider::AsyncInit(const base::Closure& done_callback) {
+ base::PostTaskWithTraitsAndReplyWithResult(
+ FROM_HERE,
+ {base::MayBlock(), base::TaskPriority::BEST_EFFORT,
+ base::TaskShutdownBehavior::SKIP_ON_SHUTDOWN},
+ base::Bind(&DriveMetricsProvider::GetDriveMetricsOnBackgroundThread,
+ local_state_path_key_),
+ base::Bind(&DriveMetricsProvider::GotDriveMetrics,
+ weak_ptr_factory_.GetWeakPtr(), done_callback));
+}
+
+DriveMetricsProvider::SeekPenaltyResponse::SeekPenaltyResponse()
+ : success(false) {}
+
+// static
+DriveMetricsProvider::DriveMetrics
+DriveMetricsProvider::GetDriveMetricsOnBackgroundThread(
+ int local_state_path_key) {
+ base::ScopedBlockingCall scoped_blocking_call(base::BlockingType::WILL_BLOCK);
+
+ DriveMetricsProvider::DriveMetrics metrics;
+ QuerySeekPenalty(base::FILE_EXE, &metrics.app_drive);
+ QuerySeekPenalty(local_state_path_key, &metrics.user_data_drive);
+ return metrics;
+}
+
+// static
+void DriveMetricsProvider::QuerySeekPenalty(
+ int path_service_key,
+ DriveMetricsProvider::SeekPenaltyResponse* response) {
+ DCHECK(response);
+
+ base::FilePath path;
+ if (!base::PathService::Get(path_service_key, &path))
+ return;
+
+ base::TimeTicks start = base::TimeTicks::Now();
+
+ response->success = HasSeekPenalty(path, &response->has_seek_penalty);
+
+ UMA_HISTOGRAM_TIMES("Hardware.Drive.HasSeekPenalty_Time",
+ base::TimeTicks::Now() - start);
+ UMA_HISTOGRAM_BOOLEAN("Hardware.Drive.HasSeekPenalty_Success",
+ response->success);
+ if (response->success) {
+ UMA_HISTOGRAM_BOOLEAN("Hardware.Drive.HasSeekPenalty",
+ response->has_seek_penalty);
+ }
+}
+
+void DriveMetricsProvider::GotDriveMetrics(
+ const base::Closure& done_callback,
+ const DriveMetricsProvider::DriveMetrics& metrics) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ metrics_ = metrics;
+ done_callback.Run();
+}
+
+void DriveMetricsProvider::FillDriveMetrics(
+ const DriveMetricsProvider::SeekPenaltyResponse& response,
+ metrics::SystemProfileProto::Hardware::Drive* drive) {
+ if (response.success)
+ drive->set_has_seek_penalty(response.has_seek_penalty);
+}
+
+} // namespace metrics
diff --git a/components/metrics/drive_metrics_provider.h b/components/metrics/drive_metrics_provider.h
new file mode 100644
index 0000000..1da73ce
--- /dev/null
+++ b/components/metrics/drive_metrics_provider.h
@@ -0,0 +1,90 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_DRIVE_METRICS_PROVIDER_H_
+#define COMPONENTS_METRICS_DRIVE_METRICS_PROVIDER_H_
+
+#include "base/callback_forward.h"
+#include "base/gtest_prod_util.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/weak_ptr.h"
+#include "base/sequence_checker.h"
+#include "components/metrics/metrics_provider.h"
+#include "third_party/metrics_proto/system_profile.pb.h"
+
+namespace base {
+class FilePath;
+}
+
+namespace metrics {
+
+// Provides metrics about the local drives on a user's computer. Currently only
+// checks to see if they incur a seek-time penalty (e.g. if they're SSDs).
+class DriveMetricsProvider : public metrics::MetricsProvider {
+ public:
+ explicit DriveMetricsProvider(int local_state_path_key);
+ ~DriveMetricsProvider() override;
+
+ // metrics::MetricsDataProvider:
+ void AsyncInit(const base::Closure& done_callback) override;
+ void ProvideSystemProfileMetrics(
+ metrics::SystemProfileProto* system_profile_proto) override;
+
+ private:
+ FRIEND_TEST_ALL_PREFIXES(DriveMetricsProviderTest, HasSeekPenalty);
+
+ // A response to querying a drive as to whether it incurs a seek penalty.
+ // |has_seek_penalty| is set if |success| is true.
+ struct SeekPenaltyResponse {
+ SeekPenaltyResponse();
+ bool success;
+ bool has_seek_penalty;
+ };
+
+ struct DriveMetrics {
+ SeekPenaltyResponse app_drive;
+ SeekPenaltyResponse user_data_drive;
+ };
+
+ // Determine whether the device that services |path| has a seek penalty.
+ // Returns false if it couldn't be determined (e.g., |path| doesn't exist).
+ static bool HasSeekPenalty(const base::FilePath& path,
+ bool* has_seek_penalty);
+
+ // Gather metrics about various drives. Should be run on a background thread.
+ static DriveMetrics GetDriveMetricsOnBackgroundThread(
+ int local_state_path_key);
+
+ // Tries to determine whether there is a penalty for seeking on the drive that
+ // hosts |path_service_key| (for example: the drive that holds "Local State").
+ static void QuerySeekPenalty(int path_service_key,
+ SeekPenaltyResponse* response);
+
+ // Called when metrics are done being gathered asynchronously.
+ // |done_callback| is the callback that should be called once all metrics are
+ // gathered.
+ void GotDriveMetrics(const base::Closure& done_callback,
+ const DriveMetrics& metrics);
+
+ // Fills |drive| with information from successful |response|s.
+ void FillDriveMetrics(const SeekPenaltyResponse& response,
+ metrics::SystemProfileProto::Hardware::Drive* drive);
+
+ // The key to give to base::PathService to obtain the path to local state
+ // (supplied by the embedder).
+ int local_state_path_key_;
+
+ // Information gathered about various important drives.
+ DriveMetrics metrics_;
+
+ SEQUENCE_CHECKER(sequence_checker_);
+ base::WeakPtrFactory<DriveMetricsProvider> weak_ptr_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(DriveMetricsProvider);
+};
+
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_DRIVE_METRICS_PROVIDER_H_
diff --git a/components/metrics/drive_metrics_provider_android.cc b/components/metrics/drive_metrics_provider_android.cc
new file mode 100644
index 0000000..a653dd6
--- /dev/null
+++ b/components/metrics/drive_metrics_provider_android.cc
@@ -0,0 +1,16 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/drive_metrics_provider.h"
+
+namespace metrics {
+
+// static
+bool DriveMetricsProvider::HasSeekPenalty(const base::FilePath& path,
+ bool* has_seek_penalty) {
+ *has_seek_penalty = false;
+ return true;
+}
+
+} // namespace metrics
diff --git a/components/metrics/drive_metrics_provider_fuchsia.cc b/components/metrics/drive_metrics_provider_fuchsia.cc
new file mode 100644
index 0000000..165bc2d
--- /dev/null
+++ b/components/metrics/drive_metrics_provider_fuchsia.cc
@@ -0,0 +1,16 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/drive_metrics_provider.h"
+
+namespace metrics {
+
+// static
+bool DriveMetricsProvider::HasSeekPenalty(const base::FilePath& path,
+ bool* has_seek_penalty) {
+ *has_seek_penalty = false;
+ return true;
+}
+
+} // namespace metrics
diff --git a/components/metrics/drive_metrics_provider_ios.mm b/components/metrics/drive_metrics_provider_ios.mm
new file mode 100644
index 0000000..a653dd6
--- /dev/null
+++ b/components/metrics/drive_metrics_provider_ios.mm
@@ -0,0 +1,16 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/drive_metrics_provider.h"
+
+namespace metrics {
+
+// static
+bool DriveMetricsProvider::HasSeekPenalty(const base::FilePath& path,
+ bool* has_seek_penalty) {
+ *has_seek_penalty = false;
+ return true;
+}
+
+} // namespace metrics
diff --git a/components/metrics/drive_metrics_provider_linux.cc b/components/metrics/drive_metrics_provider_linux.cc
new file mode 100644
index 0000000..149405e
--- /dev/null
+++ b/components/metrics/drive_metrics_provider_linux.cc
@@ -0,0 +1,53 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/drive_metrics_provider.h"
+
+#include <linux/kdev_t.h> // For MAJOR()/MINOR().
+#include <sys/stat.h>
+#include <string>
+
+#include "base/files/file.h"
+#include "base/files/file_path.h"
+#include "base/files/file_util.h"
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
+#include "build/build_config.h"
+
+namespace metrics {
+
+namespace {
+
+// See http://www.kernel.org/doc/Documentation/devices.txt for more info.
+const int kFirstScsiMajorNumber = 8;
+const int kPartitionsPerScsiDevice = 16;
+const char kRotationalFormat[] = "/sys/block/sd%c/queue/rotational";
+
+} // namespace
+
+// static
+bool DriveMetricsProvider::HasSeekPenalty(const base::FilePath& path,
+ bool* has_seek_penalty) {
+ base::File file(path, base::File::FLAG_OPEN | base::File::FLAG_READ);
+ if (!file.IsValid())
+ return false;
+
+ struct stat path_stat;
+ int error = fstat(file.GetPlatformFile(), &path_stat);
+ if (error < 0 || MAJOR(path_stat.st_dev) != kFirstScsiMajorNumber) {
+ // TODO(dbeam): support more SCSI major numbers (e.g. /dev/sdq+) and LVM?
+ return false;
+ }
+
+ char sdX = 'a' + MINOR(path_stat.st_dev) / kPartitionsPerScsiDevice;
+ std::string rotational_path = base::StringPrintf(kRotationalFormat, sdX);
+ std::string rotates;
+ if (!base::ReadFileToString(base::FilePath(rotational_path), &rotates))
+ return false;
+
+ *has_seek_penalty = rotates.substr(0, 1) == "1";
+ return true;
+}
+
+} // namespace metrics
diff --git a/components/metrics/drive_metrics_provider_mac.mm b/components/metrics/drive_metrics_provider_mac.mm
new file mode 100644
index 0000000..84dbe68
--- /dev/null
+++ b/components/metrics/drive_metrics_provider_mac.mm
@@ -0,0 +1,77 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/drive_metrics_provider.h"
+
+#include <CoreFoundation/CoreFoundation.h>
+#include <DiskArbitration/DiskArbitration.h>
+#import <Foundation/Foundation.h>
+#include <IOKit/IOKitLib.h>
+#include <IOKit/storage/IOStorageDeviceCharacteristics.h>
+#include <stdlib.h>
+#include <sys/stat.h>
+
+#include "base/files/file_path.h"
+#include "base/mac/foundation_util.h"
+#include "base/mac/mac_util.h"
+#include "base/mac/scoped_cftyperef.h"
+#include "base/mac/scoped_ioobject.h"
+
+namespace metrics {
+
+// static
+bool DriveMetricsProvider::HasSeekPenalty(const base::FilePath& path,
+ bool* has_seek_penalty) {
+ struct stat path_stat;
+ if (stat(path.value().c_str(), &path_stat) < 0)
+ return false;
+
+ const char* dev_name = devname(path_stat.st_dev, S_IFBLK);
+ if (!dev_name)
+ return false;
+
+ std::string bsd_name("/dev/");
+ bsd_name.append(dev_name);
+
+ base::ScopedCFTypeRef<DASessionRef> session(
+ DASessionCreate(kCFAllocatorDefault));
+ if (!session)
+ return false;
+
+ base::ScopedCFTypeRef<DADiskRef> disk(
+ DADiskCreateFromBSDName(kCFAllocatorDefault, session, bsd_name.c_str()));
+ if (!disk)
+ return false;
+
+ base::mac::ScopedIOObject<io_object_t> io_media(DADiskCopyIOMedia(disk));
+ base::ScopedCFTypeRef<CFDictionaryRef> characteristics(
+ static_cast<CFDictionaryRef>(IORegistryEntrySearchCFProperty(
+ io_media, kIOServicePlane, CFSTR(kIOPropertyDeviceCharacteristicsKey),
+ kCFAllocatorDefault,
+ kIORegistryIterateRecursively | kIORegistryIterateParents)));
+ if (!characteristics)
+ return false;
+
+ CFStringRef type_ref = base::mac::GetValueFromDictionary<CFStringRef>(
+ characteristics, CFSTR(kIOPropertyMediumTypeKey));
+ if (!type_ref)
+ return false;
+
+ NSString* type = base::mac::CFToNSCast(type_ref);
+ if ([type isEqualToString:@kIOPropertyMediumTypeRotationalKey]) {
+ *has_seek_penalty = true;
+ return true;
+ }
+ if ([type isEqualToString:@kIOPropertyMediumTypeSolidStateKey]) {
+ *has_seek_penalty = false;
+ return true;
+ }
+
+ // TODO(dbeam): should I look for these Rotational/Solid State keys in
+ // |characteristics|? What if I find device characteristic but there's no
+ // type? Assume rotational?
+ return false;
+}
+
+} // namespace metrics
diff --git a/components/metrics/drive_metrics_provider_unittest.cc b/components/metrics/drive_metrics_provider_unittest.cc
new file mode 100644
index 0000000..142faf7
--- /dev/null
+++ b/components/metrics/drive_metrics_provider_unittest.cc
@@ -0,0 +1,20 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/drive_metrics_provider.h"
+
+#include "base/files/file_path.h"
+#include "base/files/file_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace metrics {
+
+TEST(DriveMetricsProviderTest, HasSeekPenalty) {
+ base::FilePath tmp_path;
+ ASSERT_TRUE(base::GetTempDir(&tmp_path));
+ bool unused;
+ DriveMetricsProvider::HasSeekPenalty(tmp_path, &unused);
+}
+
+} // namespace metrics
diff --git a/components/metrics/drive_metrics_provider_win.cc b/components/metrics/drive_metrics_provider_win.cc
new file mode 100644
index 0000000..6c1334e
--- /dev/null
+++ b/components/metrics/drive_metrics_provider_win.cc
@@ -0,0 +1,46 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/drive_metrics_provider.h"
+
+#include <windows.h>
+#include <winioctl.h>
+#include <vector>
+
+#include "base/files/file.h"
+#include "base/files/file_path.h"
+#include "base/strings/stringprintf.h"
+
+namespace metrics {
+
+// static
+bool DriveMetricsProvider::HasSeekPenalty(const base::FilePath& path,
+ bool* has_seek_penalty) {
+ std::vector<base::FilePath::StringType> components;
+ path.GetComponents(&components);
+
+ base::File volume(base::FilePath(L"\\\\.\\" + components[0]),
+ base::File::FLAG_OPEN);
+ if (!volume.IsValid())
+ return false;
+
+ STORAGE_PROPERTY_QUERY query = {};
+ query.QueryType = PropertyStandardQuery;
+ query.PropertyId = StorageDeviceSeekPenaltyProperty;
+
+ DEVICE_SEEK_PENALTY_DESCRIPTOR result;
+ DWORD bytes_returned;
+
+ BOOL success = DeviceIoControl(
+ volume.GetPlatformFile(), IOCTL_STORAGE_QUERY_PROPERTY, &query,
+ sizeof(query), &result, sizeof(result), &bytes_returned, nullptr);
+
+ if (success == FALSE || bytes_returned < sizeof(result))
+ return false;
+
+ *has_seek_penalty = result.IncursSeekPenalty != FALSE;
+ return true;
+}
+
+} // namespace metrics
diff --git a/components/metrics/enabled_state_provider.cc b/components/metrics/enabled_state_provider.cc
new file mode 100644
index 0000000..31f2964
--- /dev/null
+++ b/components/metrics/enabled_state_provider.cc
@@ -0,0 +1,13 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/enabled_state_provider.h"
+
+namespace metrics {
+
+bool EnabledStateProvider::IsReportingEnabled() const {
+ return IsConsentGiven();
+}
+
+} // namespace metrics
diff --git a/components/metrics/enabled_state_provider.h b/components/metrics/enabled_state_provider.h
new file mode 100644
index 0000000..266194d
--- /dev/null
+++ b/components/metrics/enabled_state_provider.h
@@ -0,0 +1,25 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_ENABLED_STATE_PROVIDER_H_
+#define COMPONENTS_METRICS_ENABLED_STATE_PROVIDER_H_
+
+namespace metrics {
+
+// An interface that provides whether metrics should be reported.
+class EnabledStateProvider {
+ public:
+ virtual ~EnabledStateProvider() {}
+
+ // Indicates that the user has provided consent to collect and report metrics.
+ virtual bool IsConsentGiven() const = 0;
+
+ // Should collection and reporting be enabled. This should depend on consent
+ // being given.
+ virtual bool IsReportingEnabled() const;
+};
+
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_ENABLED_STATE_PROVIDER_H_
diff --git a/components/metrics/environment_recorder.cc b/components/metrics/environment_recorder.cc
new file mode 100644
index 0000000..6391f61
--- /dev/null
+++ b/components/metrics/environment_recorder.cc
@@ -0,0 +1,96 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/environment_recorder.h"
+
+#include "base/base64.h"
+#include "base/sha1.h"
+#include "base/strings/string_number_conversions.h"
+#include "components/metrics/metrics_pref_names.h"
+#include "components/prefs/pref_registry_simple.h"
+#include "components/prefs/pref_service.h"
+#include "third_party/metrics_proto/system_profile.pb.h"
+
+namespace metrics {
+
+namespace {
+
+// Computes a SHA-1 hash of |data| and returns it as a hex string.
+std::string ComputeSHA1(const std::string& data) {
+ const std::string sha1 = base::SHA1HashString(data);
+ return base::HexEncode(sha1.data(), sha1.size());
+}
+
+} // namespace
+
+EnvironmentRecorder::EnvironmentRecorder(PrefService* local_state)
+ : local_state_(local_state) {}
+
+EnvironmentRecorder::~EnvironmentRecorder() = default;
+
+std::string EnvironmentRecorder::SerializeAndRecordEnvironmentToPrefs(
+ const SystemProfileProto& system_profile) {
+ std::string serialized_system_profile;
+ std::string base64_system_profile;
+ if (system_profile.SerializeToString(&serialized_system_profile)) {
+ // Persist the system profile to disk. In the event of an unclean shutdown,
+ // it will be used as part of the initial stability report.
+ base::Base64Encode(serialized_system_profile, &base64_system_profile);
+ local_state_->SetString(prefs::kStabilitySavedSystemProfile,
+ base64_system_profile);
+ local_state_->SetString(prefs::kStabilitySavedSystemProfileHash,
+ ComputeSHA1(serialized_system_profile));
+ }
+
+ return serialized_system_profile;
+}
+
+bool EnvironmentRecorder::LoadEnvironmentFromPrefs(
+ SystemProfileProto* system_profile) {
+ DCHECK(system_profile);
+
+ const std::string base64_system_profile =
+ local_state_->GetString(prefs::kStabilitySavedSystemProfile);
+ if (base64_system_profile.empty())
+ return false;
+ const std::string system_profile_hash =
+ local_state_->GetString(prefs::kStabilitySavedSystemProfileHash);
+
+ std::string serialized_system_profile;
+ return base::Base64Decode(base64_system_profile,
+ &serialized_system_profile) &&
+ ComputeSHA1(serialized_system_profile) == system_profile_hash &&
+ system_profile->ParseFromString(serialized_system_profile);
+}
+
+void EnvironmentRecorder::ClearEnvironmentFromPrefs() {
+ local_state_->ClearPref(prefs::kStabilitySavedSystemProfile);
+ local_state_->ClearPref(prefs::kStabilitySavedSystemProfileHash);
+}
+
+int64_t EnvironmentRecorder::GetLastBuildtime() {
+ return local_state_->GetInt64(prefs::kStabilityStatsBuildTime);
+}
+
+std::string EnvironmentRecorder::GetLastVersion() {
+ return local_state_->GetString(prefs::kStabilityStatsVersion);
+}
+
+void EnvironmentRecorder::SetBuildtimeAndVersion(int64_t buildtime,
+ const std::string& version) {
+ local_state_->SetInt64(prefs::kStabilityStatsBuildTime, buildtime);
+ local_state_->SetString(prefs::kStabilityStatsVersion, version);
+}
+
+// static
+void EnvironmentRecorder::RegisterPrefs(PrefRegistrySimple* registry) {
+ registry->RegisterStringPref(prefs::kStabilitySavedSystemProfile,
+ std::string());
+ registry->RegisterStringPref(prefs::kStabilitySavedSystemProfileHash,
+ std::string());
+ registry->RegisterStringPref(prefs::kStabilityStatsVersion, std::string());
+ registry->RegisterInt64Pref(prefs::kStabilityStatsBuildTime, 0);
+}
+
+} // namespace metrics
diff --git a/components/metrics/environment_recorder.h b/components/metrics/environment_recorder.h
new file mode 100644
index 0000000..0042d2e
--- /dev/null
+++ b/components/metrics/environment_recorder.h
@@ -0,0 +1,59 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_ENVIRONMENT_RECORDER_H_
+#define COMPONENTS_METRICS_ENVIRONMENT_RECORDER_H_
+
+#include <string>
+
+#include "base/macros.h"
+
+class PrefService;
+class PrefRegistrySimple;
+
+namespace metrics {
+
+class SystemProfileProto;
+
+// Stores system profile information to prefs for creating stability logs
+// in the next launch of chrome, and reads data from previous launches.
+class EnvironmentRecorder {
+ public:
+ explicit EnvironmentRecorder(PrefService* local_state);
+ ~EnvironmentRecorder();
+
+ // Serializes the system profile and records it in prefs for the next
+ // session. Returns the uncompressed serialized proto for passing to crash
+ // reports, or the empty string if the proto can't be serialized.
+ std::string SerializeAndRecordEnvironmentToPrefs(
+ const SystemProfileProto& system_profile);
+
+ // Loads the system_profile data stored in a previous chrome session, and
+ // stores it in the |system_profile| object.
+ // Returns true iff a system profile was successfully read.
+ bool LoadEnvironmentFromPrefs(SystemProfileProto* system_profile);
+
+ // Deletes system profile data from prefs.
+ void ClearEnvironmentFromPrefs();
+
+ // Stores the buildtime of the current binary and version in prefs.
+ void SetBuildtimeAndVersion(int64_t buildtime, const std::string& version);
+
+ // Gets the buildtime stored in prefs.
+ int64_t GetLastBuildtime();
+
+ // Gets the version stored in prefs.
+ std::string GetLastVersion();
+
+ static void RegisterPrefs(PrefRegistrySimple* registry);
+
+ private:
+ PrefService* local_state_;
+
+ DISALLOW_COPY_AND_ASSIGN(EnvironmentRecorder);
+};
+
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_ENVIRONMENT_RECORDER_H_
diff --git a/components/metrics/environment_recorder_unittest.cc b/components/metrics/environment_recorder_unittest.cc
new file mode 100644
index 0000000..14c6cb7
--- /dev/null
+++ b/components/metrics/environment_recorder_unittest.cc
@@ -0,0 +1,75 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/environment_recorder.h"
+
+#include "components/metrics/metrics_pref_names.h"
+#include "components/prefs/testing_pref_service.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/metrics_proto/system_profile.pb.h"
+
+namespace metrics {
+
+class EnvironmentRecorderTest : public testing::Test {
+ public:
+ EnvironmentRecorderTest() {
+ EnvironmentRecorder::RegisterPrefs(prefs_.registry());
+ }
+
+ ~EnvironmentRecorderTest() override {}
+
+ protected:
+ TestingPrefServiceSimple prefs_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(EnvironmentRecorderTest);
+};
+
+TEST_F(EnvironmentRecorderTest, LoadEnvironmentFromPrefs) {
+ const char* kSystemProfilePref = prefs::kStabilitySavedSystemProfile;
+ const char* kSystemProfileHashPref = prefs::kStabilitySavedSystemProfileHash;
+
+ // The pref value is empty, so loading it from prefs should fail.
+ {
+ EnvironmentRecorder recorder(&prefs_);
+ SystemProfileProto system_profile;
+ EXPECT_FALSE(recorder.LoadEnvironmentFromPrefs(&system_profile));
+ EXPECT_FALSE(system_profile.has_app_version());
+ }
+
+ // Do a RecordEnvironment() call and check whether the pref is recorded.
+ {
+ EnvironmentRecorder recorder(&prefs_);
+ SystemProfileProto system_profile;
+ system_profile.set_app_version("bogus version");
+ std::string serialized_profile =
+ recorder.SerializeAndRecordEnvironmentToPrefs(system_profile);
+ EXPECT_FALSE(serialized_profile.empty());
+ EXPECT_FALSE(prefs_.GetString(kSystemProfilePref).empty());
+ EXPECT_FALSE(prefs_.GetString(kSystemProfileHashPref).empty());
+ }
+
+ // Load it and check that it has the right value.
+ {
+ EnvironmentRecorder recorder(&prefs_);
+ SystemProfileProto system_profile;
+ EXPECT_TRUE(recorder.LoadEnvironmentFromPrefs(&system_profile));
+ EXPECT_EQ("bogus version", system_profile.app_version());
+ // Ensure that the call did not clear the prefs.
+ EXPECT_FALSE(prefs_.GetString(kSystemProfilePref).empty());
+ EXPECT_FALSE(prefs_.GetString(kSystemProfileHashPref).empty());
+ }
+
+ // Ensure that a non-matching hash results in the pref being invalid.
+ {
+ // Set the hash to a bad value.
+ prefs_.SetString(kSystemProfileHashPref, "deadbeef");
+ EnvironmentRecorder recorder(&prefs_);
+ SystemProfileProto system_profile;
+ EXPECT_FALSE(recorder.LoadEnvironmentFromPrefs(&system_profile));
+ EXPECT_FALSE(system_profile.has_app_version());
+ }
+}
+
+} // namespace metrics
diff --git a/components/metrics/execution_phase.cc b/components/metrics/execution_phase.cc
new file mode 100644
index 0000000..992ef82
--- /dev/null
+++ b/components/metrics/execution_phase.cc
@@ -0,0 +1,69 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/execution_phase.h"
+
+#include "build/build_config.h"
+#include "components/metrics/metrics_pref_names.h"
+#include "components/prefs/pref_registry_simple.h"
+#include "components/prefs/pref_service.h"
+
+#if defined(OS_WIN)
+#include "components/browser_watcher/stability_data_names.h"
+#include "components/browser_watcher/stability_debugging.h"
+#endif // defined(OS_WIN)
+
+namespace metrics {
+
+ExecutionPhaseManager::ExecutionPhaseManager(PrefService* local_state)
+ : local_state_(local_state) {}
+
+ExecutionPhaseManager::~ExecutionPhaseManager() {}
+
+// static
+void ExecutionPhaseManager::RegisterPrefs(PrefRegistrySimple* registry) {
+ registry->RegisterIntegerPref(
+ prefs::kStabilityExecutionPhase,
+ static_cast<int>(ExecutionPhase::UNINITIALIZED_PHASE));
+}
+
+// static
+ExecutionPhase ExecutionPhaseManager::execution_phase_ =
+ ExecutionPhase::UNINITIALIZED_PHASE;
+
+void ExecutionPhaseManager::SetExecutionPhase(ExecutionPhase execution_phase) {
+ DCHECK(execution_phase != ExecutionPhase::START_METRICS_RECORDING ||
+ execution_phase_ == ExecutionPhase::UNINITIALIZED_PHASE);
+ execution_phase_ = execution_phase;
+ local_state_->SetInteger(prefs::kStabilityExecutionPhase,
+ static_cast<int>(execution_phase_));
+#if defined(OS_WIN)
+ browser_watcher::SetStabilityDataInt(
+ browser_watcher::kStabilityExecutionPhase,
+ static_cast<int>(execution_phase_));
+#endif // defined(OS_WIN)
+}
+
+ExecutionPhase ExecutionPhaseManager::GetExecutionPhase() {
+ // TODO(rtenneti): On windows, consider saving/getting execution_phase from
+ // the registry.
+ return static_cast<ExecutionPhase>(
+ local_state_->GetInteger(prefs::kStabilityExecutionPhase));
+}
+
+#if defined(OS_ANDROID) || defined(OS_IOS)
+void ExecutionPhaseManager::OnAppEnterBackground() {
+ // Note: the in-memory ExecutionPhaseManager::execution_phase_ is not updated.
+ local_state_->SetInteger(prefs::kStabilityExecutionPhase,
+ static_cast<int>(ExecutionPhase::SHUTDOWN_COMPLETE));
+}
+
+void ExecutionPhaseManager::OnAppEnterForeground() {
+ // Restore prefs value altered by OnEnterBackground.
+ local_state_->SetInteger(prefs::kStabilityExecutionPhase,
+ static_cast<int>(execution_phase_));
+}
+#endif // defined(OS_ANDROID) || defined(OS_IOS)
+
+} // namespace metrics
diff --git a/components/metrics/execution_phase.h b/components/metrics/execution_phase.h
new file mode 100644
index 0000000..b158923
--- /dev/null
+++ b/components/metrics/execution_phase.h
@@ -0,0 +1,55 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_EXECUTION_PHASE_H_
+#define COMPONENTS_METRICS_EXECUTION_PHASE_H_
+
+#include "base/macros.h"
+#include "build/build_config.h"
+
+class PrefService;
+class PrefRegistrySimple;
+
+namespace metrics {
+
+enum class ExecutionPhase {
+ UNINITIALIZED_PHASE = 0,
+ START_METRICS_RECORDING = 100,
+ CREATE_PROFILE = 200,
+ STARTUP_TIMEBOMB_ARM = 300,
+ THREAD_WATCHER_START = 400,
+ MAIN_MESSAGE_LOOP_RUN = 500,
+ SHUTDOWN_TIMEBOMB_ARM = 600,
+ SHUTDOWN_COMPLETE = 700,
+};
+
+// Helper class for managing ExecutionPhase state in prefs and memory.
+// It's safe to construct temporary objects to perform these operations.
+class ExecutionPhaseManager {
+ public:
+ explicit ExecutionPhaseManager(PrefService* local_state);
+ ~ExecutionPhaseManager();
+
+ static void RegisterPrefs(PrefRegistrySimple* registry);
+
+ void SetExecutionPhase(ExecutionPhase execution_phase);
+ ExecutionPhase GetExecutionPhase();
+
+#if defined(OS_ANDROID) || defined(OS_IOS)
+ void OnAppEnterBackground();
+ void OnAppEnterForeground();
+#endif // defined(OS_ANDROID) || defined(OS_IOS)
+
+ private:
+ // Execution phase the browser is in.
+ static ExecutionPhase execution_phase_;
+
+ PrefService* local_state_;
+
+ DISALLOW_COPY_AND_ASSIGN(ExecutionPhaseManager);
+};
+
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_EXECUTION_PHASE_H_
diff --git a/components/metrics/expired_histogram_util.cc b/components/metrics/expired_histogram_util.cc
new file mode 100644
index 0000000..17844e7
--- /dev/null
+++ b/components/metrics/expired_histogram_util.cc
@@ -0,0 +1,34 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/expired_histogram_util.h"
+
+#include "base/feature_list.h"
+#include "base/metrics/field_trial_params.h"
+#include "base/metrics/statistics_recorder.h"
+#include "components/metrics/expired_histograms_checker.h"
+
+namespace metrics {
+namespace {
+
+const base::Feature kExpiredHistogramLogicFeature{
+ "ExpiredHistogramLogic", base::FEATURE_DISABLED_BY_DEFAULT};
+
+const base::FeatureParam<std::string> kWhitelistParam{
+ &kExpiredHistogramLogicFeature, "whitelist", ""};
+
+} // namespace
+
+void EnableExpiryChecker(const uint64_t* expired_histograms_hashes,
+ size_t num_expired_histograms) {
+ DCHECK(base::FeatureList::GetInstance());
+ if (base::FeatureList::IsEnabled(kExpiredHistogramLogicFeature)) {
+ base::StatisticsRecorder::SetRecordChecker(
+ std::make_unique<ExpiredHistogramsChecker>(expired_histograms_hashes,
+ num_expired_histograms,
+ kWhitelistParam.Get()));
+ }
+}
+
+} // namespace metrics
\ No newline at end of file
diff --git a/components/metrics/expired_histogram_util.h b/components/metrics/expired_histogram_util.h
new file mode 100644
index 0000000..cf6c455
--- /dev/null
+++ b/components/metrics/expired_histogram_util.h
@@ -0,0 +1,21 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_EXPIRED_HISTOGRAM_UTIL_H_
+#define COMPONENTS_METRICS_EXPIRED_HISTOGRAM_UTIL_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+namespace metrics {
+
+// Enables histogram expiry checker if it is enabled by field trial. Histogram
+// expiry is disbaled by default so that unit tests don't fail unexpectedly when
+// a histogram expires.
+void EnableExpiryChecker(const uint64_t* expired_histograms_hashes,
+ size_t num_expired_histograms);
+
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_EXPIRED_HISTOGRAM_UTIL_H_
\ No newline at end of file
diff --git a/components/metrics/expired_histograms_checker.cc b/components/metrics/expired_histograms_checker.cc
new file mode 100644
index 0000000..d1d3ecb
--- /dev/null
+++ b/components/metrics/expired_histograms_checker.cc
@@ -0,0 +1,41 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/expired_histograms_checker.h"
+
+#include <algorithm>
+#include <vector>
+
+#include "base/metrics/metrics_hashes.h"
+#include "base/metrics/statistics_recorder.h"
+#include "base/stl_util.h"
+#include "base/strings/string_split.h"
+
+namespace metrics {
+
+ExpiredHistogramsChecker::ExpiredHistogramsChecker(
+ const uint64_t* array,
+ size_t size,
+ const std::string& whitelist_str)
+ : array_(array), size_(size) {
+ InitWhitelist(whitelist_str);
+}
+
+ExpiredHistogramsChecker::~ExpiredHistogramsChecker() {}
+
+bool ExpiredHistogramsChecker::ShouldRecord(uint64_t histogram_hash) const {
+ // If histogram is whitelisted then it should always be recorded.
+ if (base::ContainsKey(whitelist_, histogram_hash))
+ return true;
+ return !std::binary_search(array_, array_ + size_, histogram_hash);
+}
+
+void ExpiredHistogramsChecker::InitWhitelist(const std::string& whitelist_str) {
+ std::vector<base::StringPiece> whitelist_names = base::SplitStringPiece(
+ whitelist_str, ",", base::TRIM_WHITESPACE, base::SPLIT_WANT_NONEMPTY);
+ for (base::StringPiece name : whitelist_names)
+ whitelist_.insert(base::HashMetricName(name));
+}
+
+} // namespace metrics
diff --git a/components/metrics/expired_histograms_checker.h b/components/metrics/expired_histograms_checker.h
new file mode 100644
index 0000000..b3ca4a7
--- /dev/null
+++ b/components/metrics/expired_histograms_checker.h
@@ -0,0 +1,51 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_EXPIRED_HISTOGRAMS_CHECKER_H_
+#define COMPONENTS_METRICS_EXPIRED_HISTOGRAMS_CHECKER_H_
+
+#include <stdint.h>
+#include <set>
+
+#include "base/macros.h"
+#include "base/metrics/record_histogram_checker.h"
+#include "base/strings/string_piece.h"
+
+namespace metrics {
+
+// ExpiredHistogramsChecker implements RecordHistogramChecker interface
+// to avoid recording expired metrics.
+class ExpiredHistogramsChecker final : public base::RecordHistogramChecker {
+ public:
+ // Takes sorted in nondecreasing order array of histogram hashes, its size and
+ // list of whitelisted histogram names concatenated as a comma-separated
+ // string.
+ ExpiredHistogramsChecker(const uint64_t* array,
+ size_t size,
+ const std::string& whitelist_str);
+ ~ExpiredHistogramsChecker() override;
+
+ // Checks if the given |histogram_hash| corresponds to an expired histogram.
+ bool ShouldRecord(uint64_t histogram_hash) const override;
+
+ private:
+ // Initializes the |whitelist_| array of histogram hashes that should be
+ // recorded regardless of their expiration.
+ void InitWhitelist(const std::string& whitelist_str);
+
+ // Array of expired histogram hashes.
+ const uint64_t* const array_;
+
+ // Size of the |array_|.
+ const size_t size_;
+
+ // List of expired histogram hashes that should be recorded.
+ std::set<uint64_t> whitelist_;
+
+ DISALLOW_COPY_AND_ASSIGN(ExpiredHistogramsChecker);
+};
+
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_EXPIRED_HISTOGRAMS_CHECKER_H_
diff --git a/components/metrics/expired_histograms_checker_unittest.cc b/components/metrics/expired_histograms_checker_unittest.cc
new file mode 100644
index 0000000..13aa77d
--- /dev/null
+++ b/components/metrics/expired_histograms_checker_unittest.cc
@@ -0,0 +1,40 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/expired_histograms_checker.h"
+
+#include "base/metrics/metrics_hashes.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace metrics {
+
+TEST(ExpiredHistogramsCheckerTests, BasicTest) {
+ uint64_t expired_hashes[] = {1, 2, 3};
+ size_t size = 3;
+ std::string whitelist_str = "";
+ ExpiredHistogramsChecker checker(expired_hashes, size, whitelist_str);
+
+ EXPECT_TRUE(checker.ShouldRecord(0));
+ EXPECT_FALSE(checker.ShouldRecord(3));
+}
+
+TEST(ExpiredHistogramsCheckerTests, WhitelistTest) {
+ std::string hist1 = "hist1";
+ std::string hist2 = "hist2";
+ std::string hist3 = "hist3";
+ std::string hist4 = "hist4";
+
+ uint64_t expired_hashes[] = {base::HashMetricName(hist1),
+ base::HashMetricName(hist2)};
+ size_t size = 2;
+ std::string whitelist_str = hist2 + "," + hist4;
+ ExpiredHistogramsChecker checker(expired_hashes, size, whitelist_str);
+
+ EXPECT_FALSE(checker.ShouldRecord(base::HashMetricName(hist1)));
+ EXPECT_TRUE(checker.ShouldRecord(base::HashMetricName(hist2)));
+ EXPECT_TRUE(checker.ShouldRecord(base::HashMetricName(hist3)));
+ EXPECT_TRUE(checker.ShouldRecord(base::HashMetricName(hist4)));
+}
+
+} // namespace metrics
\ No newline at end of file
diff --git a/components/metrics/field_trials_provider.cc b/components/metrics/field_trials_provider.cc
new file mode 100644
index 0000000..ea272f5
--- /dev/null
+++ b/components/metrics/field_trials_provider.cc
@@ -0,0 +1,65 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/field_trials_provider.h"
+
+#include "base/strings/string_piece.h"
+#include "components/variations/active_field_trials.h"
+#include "components/variations/synthetic_trial_registry.h"
+#include "third_party/metrics_proto/system_profile.pb.h"
+
+namespace variations {
+
+namespace {
+
+void WriteFieldTrials(const std::vector<ActiveGroupId>& field_trial_ids,
+ metrics::SystemProfileProto* system_profile) {
+ for (const ActiveGroupId& id : field_trial_ids) {
+ metrics::SystemProfileProto::FieldTrial* field_trial =
+ system_profile->add_field_trial();
+ field_trial->set_name_id(id.name);
+ field_trial->set_group_id(id.group);
+ }
+}
+
+} // namespace
+
+FieldTrialsProvider::FieldTrialsProvider(SyntheticTrialRegistry* registry,
+ base::StringPiece suffix)
+ : registry_(registry), suffix_(suffix) {}
+FieldTrialsProvider::~FieldTrialsProvider() = default;
+
+void FieldTrialsProvider::GetFieldTrialIds(
+ std::vector<ActiveGroupId>* field_trial_ids) const {
+ // We use the default field trial suffixing (no suffix).
+ variations::GetFieldTrialActiveGroupIds(suffix_, field_trial_ids);
+}
+
+void FieldTrialsProvider::OnDidCreateMetricsLog() {
+ if (registry_) {
+ creation_times_.push_back(base::TimeTicks::Now());
+ }
+}
+
+void FieldTrialsProvider::ProvideSystemProfileMetrics(
+ metrics::SystemProfileProto* system_profile_proto) {
+ std::vector<ActiveGroupId> field_trial_ids;
+ GetFieldTrialIds(&field_trial_ids);
+ WriteFieldTrials(field_trial_ids, system_profile_proto);
+
+ if (registry_) {
+ base::TimeTicks creation_time;
+ // Should always be true, but don't crash even if there is a bug.
+ if (!creation_times_.empty()) {
+ creation_time = creation_times_.back();
+ creation_times_.pop_back();
+ }
+ std::vector<ActiveGroupId> synthetic_trials;
+ registry_->GetSyntheticFieldTrialsOlderThan(creation_time,
+ &synthetic_trials);
+ WriteFieldTrials(synthetic_trials, system_profile_proto);
+ }
+}
+
+} // namespace variations
diff --git a/components/metrics/field_trials_provider.h b/components/metrics/field_trials_provider.h
new file mode 100644
index 0000000..d93988e
--- /dev/null
+++ b/components/metrics/field_trials_provider.h
@@ -0,0 +1,55 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_FIELD_TRIALS_PROVIDER_H_
+#define COMPONENTS_METRICS_FIELD_TRIALS_PROVIDER_H_
+
+#include <vector>
+
+#include "base/strings/string_piece.h"
+#include "base/time/time.h"
+#include "components/metrics/metrics_provider.h"
+
+// TODO(crbug/507665): Once MetricsProvider/SystemProfileProto are moved into
+// //services/metrics, then //components/variations can depend on them, and
+// this should be moved there.
+namespace variations {
+
+class SyntheticTrialRegistry;
+struct ActiveGroupId;
+
+class FieldTrialsProvider : public metrics::MetricsProvider {
+ public:
+ // |registry| must outlive this metrics provider.
+ FieldTrialsProvider(SyntheticTrialRegistry* registry,
+ base::StringPiece suffix);
+ ~FieldTrialsProvider() override;
+
+ // metrics::MetricsProvider:
+ void OnDidCreateMetricsLog() override;
+ void ProvideSystemProfileMetrics(
+ metrics::SystemProfileProto* system_profile_proto) override;
+
+ private:
+ // Overrideable for testing.
+ virtual void GetFieldTrialIds(
+ std::vector<ActiveGroupId>* field_trial_ids) const;
+
+ SyntheticTrialRegistry* registry_;
+
+ // Suffix used for the field trial names before they are hashed for uploads.
+ std::string suffix_;
+
+ // A stack of log creation times.
+ // While the initial metrics log exists, there will be two logs open.
+ // Use a stack so that we use the right creation time for the first ongoing
+ // log.
+ // TODO(crbug/746098): Simplify InitialMetricsLog logic so this is not
+ // necessary.
+ std::vector<base::TimeTicks> creation_times_;
+};
+
+} // namespace variations
+
+#endif // COMPONENTS_METRICS_FIELD_TRIALS_PROVIDER_H_
diff --git a/components/metrics/field_trials_provider_unittest.cc b/components/metrics/field_trials_provider_unittest.cc
new file mode 100644
index 0000000..2f6c8ef
--- /dev/null
+++ b/components/metrics/field_trials_provider_unittest.cc
@@ -0,0 +1,103 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/field_trials_provider.h"
+
+#include "components/variations/active_field_trials.h"
+#include "components/variations/synthetic_trial_registry.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/metrics_proto/system_profile.pb.h"
+
+namespace variations {
+
+namespace {
+
+const ActiveGroupId kFieldTrialIds[] = {{37, 43}, {13, 47}, {23, 17}};
+const ActiveGroupId kSyntheticTrials[] = {{55, 15}, {66, 16}};
+
+class TestProvider : public FieldTrialsProvider {
+ public:
+ TestProvider(SyntheticTrialRegistry* registry, base::StringPiece suffix)
+ : FieldTrialsProvider(registry, suffix) {}
+ ~TestProvider() override {}
+
+ void GetFieldTrialIds(
+ std::vector<ActiveGroupId>* field_trial_ids) const override {
+ ASSERT_TRUE(field_trial_ids->empty());
+ for (const ActiveGroupId& id : kFieldTrialIds) {
+ field_trial_ids->push_back(id);
+ }
+ }
+};
+
+// Check that the values in |system_values| correspond to the test data
+// defined at the top of this file.
+void CheckSystemProfile(const metrics::SystemProfileProto& system_profile) {
+ ASSERT_EQ(arraysize(kFieldTrialIds) + arraysize(kSyntheticTrials),
+ static_cast<size_t>(system_profile.field_trial_size()));
+ for (size_t i = 0; i < arraysize(kFieldTrialIds); ++i) {
+ const metrics::SystemProfileProto::FieldTrial& field_trial =
+ system_profile.field_trial(i);
+ EXPECT_EQ(kFieldTrialIds[i].name, field_trial.name_id());
+ EXPECT_EQ(kFieldTrialIds[i].group, field_trial.group_id());
+ }
+ // Verify the right data is present for the synthetic trials.
+ for (size_t i = 0; i < arraysize(kSyntheticTrials); ++i) {
+ const metrics::SystemProfileProto::FieldTrial& field_trial =
+ system_profile.field_trial(i + arraysize(kFieldTrialIds));
+ EXPECT_EQ(kSyntheticTrials[i].name, field_trial.name_id());
+ EXPECT_EQ(kSyntheticTrials[i].group, field_trial.group_id());
+ }
+}
+
+} // namespace
+
+class FieldTrialsProviderTest : public ::testing::Test {
+ public:
+ FieldTrialsProviderTest() {}
+ ~FieldTrialsProviderTest() override {}
+
+ protected:
+ // Register trials which should get recorded.
+ void RegisterExpectedSyntheticTrials() {
+ for (const ActiveGroupId& id : kSyntheticTrials) {
+ registry_.RegisterSyntheticFieldTrial(
+ SyntheticTrialGroup(id.name, id.group));
+ }
+ }
+ // Register trial which shouldn't get recorded.
+ void RegisterExtraSyntheticTrial() {
+ registry_.RegisterSyntheticFieldTrial(SyntheticTrialGroup(100, 1000));
+ }
+
+ // Waits until base::TimeTicks::Now() no longer equals |value|. This should
+ // take between 1-15ms per the documented resolution of base::TimeTicks.
+ void WaitUntilTimeChanges(const base::TimeTicks& value) {
+ while (base::TimeTicks::Now() == value) {
+ base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(1));
+ }
+ }
+
+ SyntheticTrialRegistry registry_;
+};
+
+TEST_F(FieldTrialsProviderTest, ProvideSyntheticTrials) {
+ TestProvider provider(®istry_, base::StringPiece());
+
+ RegisterExpectedSyntheticTrials();
+ // Make sure these trials are older than the log.
+ WaitUntilTimeChanges(base::TimeTicks::Now());
+
+ provider.OnDidCreateMetricsLog();
+ // Make sure that the log is older than the trials that should be excluded.
+ WaitUntilTimeChanges(base::TimeTicks::Now());
+
+ RegisterExtraSyntheticTrial();
+
+ metrics::SystemProfileProto proto;
+ provider.ProvideSystemProfileMetrics(&proto);
+ CheckSystemProfile(proto);
+}
+
+} // namespace variations
diff --git a/components/metrics/file_metrics_provider.cc b/components/metrics/file_metrics_provider.cc
new file mode 100644
index 0000000..a94bb07
--- /dev/null
+++ b/components/metrics/file_metrics_provider.cc
@@ -0,0 +1,837 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/file_metrics_provider.h"
+
+#include <memory>
+
+#include "base/command_line.h"
+#include "base/containers/flat_map.h"
+#include "base/files/file.h"
+#include "base/files/file_enumerator.h"
+#include "base/files/file_util.h"
+#include "base/files/memory_mapped_file.h"
+#include "base/logging.h"
+#include "base/metrics/histogram_base.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/metrics/persistent_histogram_allocator.h"
+#include "base/metrics/persistent_memory_allocator.h"
+#include "base/strings/string_piece.h"
+#include "base/task/post_task.h"
+#include "base/task/task_traits.h"
+#include "base/task_runner.h"
+#include "base/time/time.h"
+#include "components/metrics/metrics_pref_names.h"
+#include "components/metrics/metrics_service.h"
+#include "components/metrics/persistent_system_profile.h"
+#include "components/prefs/pref_registry_simple.h"
+#include "components/prefs/pref_service.h"
+
+namespace metrics {
+
+namespace {
+
+// These structures provide values used to define how files are opened and
+// accessed. It obviates the need for multiple code-paths within several of
+// the methods.
+struct SourceOptions {
+ // The flags to be used to open a file on disk.
+ int file_open_flags;
+
+ // The access mode to be used when mapping a file into memory.
+ base::MemoryMappedFile::Access memory_mapped_access;
+
+ // Indicates if the file is to be accessed read-only.
+ bool is_read_only;
+};
+
+enum : int {
+ // Opening a file typically requires at least these flags.
+ STD_OPEN = base::File::FLAG_OPEN | base::File::FLAG_READ,
+};
+
+constexpr SourceOptions kSourceOptions[] = {
+ // SOURCE_HISTOGRAMS_ATOMIC_FILE
+ {
+ // Ensure that no other process reads this at the same time.
+ STD_OPEN | base::File::FLAG_EXCLUSIVE_READ,
+ base::MemoryMappedFile::READ_ONLY,
+ true
+ },
+ // SOURCE_HISTOGRAMS_ATOMIC_DIR
+ {
+ // Ensure that no other process reads this at the same time.
+ STD_OPEN | base::File::FLAG_EXCLUSIVE_READ,
+ base::MemoryMappedFile::READ_ONLY,
+ true
+ },
+ // SOURCE_HISTOGRAMS_ACTIVE_FILE
+ {
+ // Allow writing (updated "logged" values) to the file.
+ STD_OPEN | base::File::FLAG_WRITE,
+ base::MemoryMappedFile::READ_WRITE,
+ false
+ }
+};
+
+enum EmbeddedProfileResult : int {
+ EMBEDDED_PROFILE_ATTEMPT,
+ EMBEDDED_PROFILE_FOUND,
+ EMBEDDED_PROFILE_FALLBACK,
+ EMBEDDED_PROFILE_DROPPED,
+ EMBEDDED_PROFILE_ACTION_MAX
+};
+
+void RecordEmbeddedProfileResult(EmbeddedProfileResult result) {
+ UMA_HISTOGRAM_ENUMERATION("UMA.FileMetricsProvider.EmbeddedProfileResult",
+ result, EMBEDDED_PROFILE_ACTION_MAX);
+}
+
+void DeleteFileWhenPossible(const base::FilePath& path) {
+ // Open (with delete) and then immediately close the file by going out of
+ // scope. This is the only cross-platform safe way to delete a file that may
+ // be open elsewhere, a distinct possibility given the asynchronous nature
+ // of the delete task.
+ base::File file(path, base::File::FLAG_OPEN | base::File::FLAG_READ |
+ base::File::FLAG_DELETE_ON_CLOSE);
+}
+
+// A task runner to use for testing.
+base::TaskRunner* g_task_runner_for_testing = nullptr;
+
+// Returns a task runner appropriate for running background tasks that perform
+// file I/O.
+scoped_refptr<base::TaskRunner> CreateBackgroundTaskRunner() {
+ if (g_task_runner_for_testing)
+ return scoped_refptr<base::TaskRunner>(g_task_runner_for_testing);
+
+ return base::CreateTaskRunnerWithTraits(
+ {base::MayBlock(), base::TaskPriority::BEST_EFFORT,
+ base::TaskShutdownBehavior::SKIP_ON_SHUTDOWN});
+}
+
+} // namespace
+
+// This structure stores all the information about the sources being monitored
+// and their current reporting state.
+struct FileMetricsProvider::SourceInfo {
+ SourceInfo(const Params& params)
+ : type(params.type),
+ association(params.association),
+ prefs_key(params.prefs_key),
+ filter(params.filter),
+ max_age(params.max_age),
+ max_dir_kib(params.max_dir_kib),
+ max_dir_files(params.max_dir_files) {
+ switch (type) {
+ case SOURCE_HISTOGRAMS_ACTIVE_FILE:
+ DCHECK(prefs_key.empty());
+ FALLTHROUGH;
+ case SOURCE_HISTOGRAMS_ATOMIC_FILE:
+ path = params.path;
+ break;
+ case SOURCE_HISTOGRAMS_ATOMIC_DIR:
+ directory = params.path;
+ break;
+ }
+ }
+ ~SourceInfo() {}
+
+ struct FoundFile {
+ base::FilePath path;
+ base::FileEnumerator::FileInfo info;
+ };
+ using FoundFiles = base::flat_map<base::Time, FoundFile>;
+
+ // How to access this source (file/dir, atomic/active).
+ const SourceType type;
+
+ // With what run this source is associated.
+ const SourceAssociation association;
+
+ // Where on disk the directory is located. This will only be populated when
+ // a directory is being monitored.
+ base::FilePath directory;
+
+ // The files found in the above directory, ordered by last-modified.
+ std::unique_ptr<FoundFiles> found_files;
+
+ // Where on disk the file is located. If a directory is being monitored,
+ // this will be updated for whatever file is being read.
+ base::FilePath path;
+
+ // Name used inside prefs to persistent metadata.
+ std::string prefs_key;
+
+ // The filter callback for determining what to do with found files.
+ FilterCallback filter;
+
+ // The maximum allowed age of a file.
+ base::TimeDelta max_age;
+
+ // The maximum allowed bytes in a directory.
+ size_t max_dir_kib;
+
+ // The maximum allowed files in a directory.
+ size_t max_dir_files;
+
+ // The last-seen time of this source to detect change.
+ base::Time last_seen;
+
+ // Indicates if the data has been read out or not.
+ bool read_complete = false;
+
+ // Once a file has been recognized as needing to be read, it is mapped
+ // into memory and assigned to an |allocator| object.
+ std::unique_ptr<base::PersistentHistogramAllocator> allocator;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(SourceInfo);
+};
+
+FileMetricsProvider::Params::Params(const base::FilePath& path,
+ SourceType type,
+ SourceAssociation association,
+ base::StringPiece prefs_key)
+ : path(path), type(type), association(association), prefs_key(prefs_key) {}
+
+FileMetricsProvider::Params::~Params() {}
+
+FileMetricsProvider::FileMetricsProvider(PrefService* local_state)
+ : task_runner_(CreateBackgroundTaskRunner()),
+ pref_service_(local_state),
+ weak_factory_(this) {
+ base::StatisticsRecorder::RegisterHistogramProvider(
+ weak_factory_.GetWeakPtr());
+}
+
+FileMetricsProvider::~FileMetricsProvider() {}
+
+void FileMetricsProvider::RegisterSource(const Params& params) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+
+ // Ensure that kSourceOptions has been filled for this type.
+ DCHECK_GT(arraysize(kSourceOptions), static_cast<size_t>(params.type));
+
+ std::unique_ptr<SourceInfo> source(new SourceInfo(params));
+
+ // |prefs_key| may be empty if the caller does not wish to persist the
+ // state across instances of the program.
+ if (pref_service_ && !params.prefs_key.empty()) {
+ source->last_seen = base::Time::FromInternalValue(
+ pref_service_->GetInt64(metrics::prefs::kMetricsLastSeenPrefix +
+ source->prefs_key));
+ }
+
+ switch (params.association) {
+ case ASSOCIATE_CURRENT_RUN:
+ case ASSOCIATE_INTERNAL_PROFILE:
+ sources_to_check_.push_back(std::move(source));
+ break;
+ case ASSOCIATE_PREVIOUS_RUN:
+ case ASSOCIATE_INTERNAL_PROFILE_OR_PREVIOUS_RUN:
+ DCHECK_EQ(SOURCE_HISTOGRAMS_ATOMIC_FILE, source->type);
+ sources_for_previous_run_.push_back(std::move(source));
+ break;
+ }
+}
+
+// static
+void FileMetricsProvider::RegisterPrefs(PrefRegistrySimple* prefs,
+ const base::StringPiece prefs_key) {
+ prefs->RegisterInt64Pref(metrics::prefs::kMetricsLastSeenPrefix +
+ prefs_key.as_string(), 0);
+}
+
+// static
+void FileMetricsProvider::SetTaskRunnerForTesting(
+ const scoped_refptr<base::TaskRunner>& task_runner) {
+ DCHECK(!g_task_runner_for_testing || !task_runner);
+ g_task_runner_for_testing = task_runner.get();
+}
+
+// static
+void FileMetricsProvider::RecordAccessResult(AccessResult result) {
+ UMA_HISTOGRAM_ENUMERATION("UMA.FileMetricsProvider.AccessResult", result,
+ ACCESS_RESULT_MAX);
+}
+
+// static
+bool FileMetricsProvider::LocateNextFileInDirectory(SourceInfo* source) {
+ DCHECK_EQ(SOURCE_HISTOGRAMS_ATOMIC_DIR, source->type);
+ DCHECK(!source->directory.empty());
+
+ // Cumulative directory stats. These will remain zero if the directory isn't
+ // scanned but that's okay since any work they would cause to be done below
+ // would have been done during the first call where the directory was fully
+ // scanned.
+ size_t total_size_kib = 0; // Using KiB allows 4TiB even on 32-bit builds.
+ size_t file_count = 0;
+
+ base::Time now_time = base::Time::Now();
+ if (!source->found_files) {
+ source->found_files = std::make_unique<SourceInfo::FoundFiles>();
+ base::FileEnumerator file_iter(source->directory, /*recursive=*/false,
+ base::FileEnumerator::FILES);
+ SourceInfo::FoundFile found_file;
+
+ // Open the directory and find all the files, remembering the last-modified
+ // time of each.
+ for (found_file.path = file_iter.Next(); !found_file.path.empty();
+ found_file.path = file_iter.Next()) {
+ found_file.info = file_iter.GetInfo();
+
+ // Ignore directories.
+ if (found_file.info.IsDirectory())
+ continue;
+
+ // Ignore temporary files.
+ base::FilePath::CharType first_character =
+ found_file.path.BaseName().value().front();
+ if (first_character == FILE_PATH_LITERAL('.') ||
+ first_character == FILE_PATH_LITERAL('_')) {
+ continue;
+ }
+
+ // Ignore non-PMA (Persistent Memory Allocator) files.
+ if (found_file.path.Extension() !=
+ base::PersistentMemoryAllocator::kFileExtension) {
+ continue;
+ }
+
+ // Process real files.
+ total_size_kib += found_file.info.GetSize() >> 10;
+ base::Time modified = found_file.info.GetLastModifiedTime();
+ if (modified > source->last_seen) {
+ // This file hasn't been read. Remember it (unless from the future).
+ if (modified <= now_time)
+ source->found_files->emplace(modified, std::move(found_file));
+ ++file_count;
+ } else {
+ // This file has been read. Try to delete it. Ignore any errors because
+ // the file may be un-removeable by this process. It could, for example,
+ // have been created by a privileged process like setup.exe. Even if it
+ // is not removed, it will continue to be ignored bacuse of the older
+ // modification time.
+ base::DeleteFile(found_file.path, /*recursive=*/false);
+ }
+ }
+ }
+
+ // Filter files from the front until one is found for processing.
+ bool have_file = false;
+ while (!source->found_files->empty()) {
+ SourceInfo::FoundFile found =
+ std::move(source->found_files->begin()->second);
+ source->found_files->erase(source->found_files->begin());
+
+ bool too_many =
+ source->max_dir_files > 0 && file_count > source->max_dir_files;
+ bool too_big =
+ source->max_dir_kib > 0 && total_size_kib > source->max_dir_kib;
+ bool too_old =
+ source->max_age != base::TimeDelta() &&
+ now_time - found.info.GetLastModifiedTime() > source->max_age;
+ if (too_many || too_big || too_old) {
+ base::DeleteFile(found.path, /*recursive=*/false);
+ --file_count;
+ total_size_kib -= found.info.GetSize() >> 10;
+ RecordAccessResult(too_many ? ACCESS_RESULT_TOO_MANY_FILES
+ : too_big ? ACCESS_RESULT_TOO_MANY_BYTES
+ : ACCESS_RESULT_TOO_OLD);
+ continue;
+ }
+
+ AccessResult result = HandleFilterSource(source, found.path);
+ if (result == ACCESS_RESULT_SUCCESS) {
+ source->path = std::move(found.path);
+ have_file = true;
+ break;
+ }
+
+ // Record the result. Success will be recorded by the caller.
+ if (result != ACCESS_RESULT_THIS_PID)
+ RecordAccessResult(result);
+ }
+
+ return have_file;
+}
+
+// static
+void FileMetricsProvider::FinishedWithSource(SourceInfo* source,
+ AccessResult result) {
+ // Different source types require different post-processing.
+ switch (source->type) {
+ case SOURCE_HISTOGRAMS_ATOMIC_FILE:
+ case SOURCE_HISTOGRAMS_ATOMIC_DIR:
+ // Done with this file so delete the allocator and its owned file.
+ source->allocator.reset();
+ // Remove the file if has been recorded. This prevents them from
+ // accumulating or also being recorded by different instances of
+ // the browser.
+ if (result == ACCESS_RESULT_SUCCESS ||
+ result == ACCESS_RESULT_NOT_MODIFIED ||
+ result == ACCESS_RESULT_MEMORY_DELETED ||
+ result == ACCESS_RESULT_TOO_OLD) {
+ DeleteFileWhenPossible(source->path);
+ }
+ break;
+ case SOURCE_HISTOGRAMS_ACTIVE_FILE:
+ // Keep the allocator open so it doesn't have to be re-mapped each
+ // time. This also allows the contents to be merged on-demand.
+ break;
+ }
+}
+
+// static
+void FileMetricsProvider::CheckAndMergeMetricSourcesOnTaskRunner(
+ SourceInfoList* sources) {
+ // This method has all state information passed in |sources| and is intended
+ // to run on a worker thread rather than the UI thread.
+ for (std::unique_ptr<SourceInfo>& source : *sources) {
+ AccessResult result;
+ do {
+ result = CheckAndMapMetricSource(source.get());
+
+ // Some results are not reported in order to keep the dashboard clean.
+ if (result != ACCESS_RESULT_DOESNT_EXIST &&
+ result != ACCESS_RESULT_NOT_MODIFIED &&
+ result != ACCESS_RESULT_THIS_PID) {
+ RecordAccessResult(result);
+ }
+
+ // If there are no files (or no more files) in this source, stop now.
+ if (result == ACCESS_RESULT_DOESNT_EXIST)
+ break;
+
+ // Mapping was successful. Merge it.
+ if (result == ACCESS_RESULT_SUCCESS) {
+ // Metrics associated with internal profiles have to be fetched directly
+ // so just keep the mapping for use by the main thread.
+ if (source->association == ASSOCIATE_INTERNAL_PROFILE)
+ break;
+
+ MergeHistogramDeltasFromSource(source.get());
+ DCHECK(source->read_complete);
+ }
+
+ // All done with this source.
+ FinishedWithSource(source.get(), result);
+
+ // If it's a directory, keep trying until a file is successfully opened.
+ // When there are no more files, ACCESS_RESULT_DOESNT_EXIST will be
+ // returned and the loop will exit above.
+ } while (result != ACCESS_RESULT_SUCCESS && !source->directory.empty());
+
+ // If the set of known files is empty, clear the object so the next run
+ // will do a fresh scan of the directory.
+ if (source->found_files && source->found_files->empty())
+ source->found_files.reset();
+ }
+}
+
+// This method has all state information passed in |source| and is intended
+// to run on a worker thread rather than the UI thread.
+// static
+FileMetricsProvider::AccessResult FileMetricsProvider::CheckAndMapMetricSource(
+ SourceInfo* source) {
+ // If source was read, clean up after it.
+ if (source->read_complete)
+ FinishedWithSource(source, ACCESS_RESULT_SUCCESS);
+ source->read_complete = false;
+ DCHECK(!source->allocator);
+
+ // If the source is a directory, look for files within it.
+ if (!source->directory.empty() && !LocateNextFileInDirectory(source))
+ return ACCESS_RESULT_DOESNT_EXIST;
+
+ // Do basic validation on the file metadata.
+ base::File::Info info;
+ if (!base::GetFileInfo(source->path, &info))
+ return ACCESS_RESULT_DOESNT_EXIST;
+
+ if (info.is_directory || info.size == 0)
+ return ACCESS_RESULT_INVALID_FILE;
+
+ if (source->last_seen >= info.last_modified)
+ return ACCESS_RESULT_NOT_MODIFIED;
+ if (source->max_age != base::TimeDelta() &&
+ base::Time::Now() - info.last_modified > source->max_age) {
+ return ACCESS_RESULT_TOO_OLD;
+ }
+
+ // Non-directory files still need to be filtered.
+ if (source->directory.empty()) {
+ AccessResult result = HandleFilterSource(source, source->path);
+ if (result != ACCESS_RESULT_SUCCESS)
+ return result;
+ }
+
+ // A new file of metrics has been found.
+ base::File file(source->path, kSourceOptions[source->type].file_open_flags);
+ if (!file.IsValid())
+ return ACCESS_RESULT_NO_OPEN;
+
+ std::unique_ptr<base::MemoryMappedFile> mapped(new base::MemoryMappedFile());
+ if (!mapped->Initialize(std::move(file),
+ kSourceOptions[source->type].memory_mapped_access)) {
+ return ACCESS_RESULT_SYSTEM_MAP_FAILURE;
+ }
+
+ // Ensure any problems below don't occur repeatedly.
+ source->last_seen = info.last_modified;
+
+ // Test the validity of the file contents.
+ const bool read_only = kSourceOptions[source->type].is_read_only;
+ if (!base::FilePersistentMemoryAllocator::IsFileAcceptable(*mapped,
+ read_only)) {
+ return ACCESS_RESULT_INVALID_CONTENTS;
+ }
+
+ // Map the file and validate it.
+ std::unique_ptr<base::PersistentMemoryAllocator> memory_allocator =
+ std::make_unique<base::FilePersistentMemoryAllocator>(
+ std::move(mapped), 0, 0, base::StringPiece(), read_only);
+ if (memory_allocator->GetMemoryState() ==
+ base::PersistentMemoryAllocator::MEMORY_DELETED) {
+ return ACCESS_RESULT_MEMORY_DELETED;
+ }
+ if (memory_allocator->IsCorrupt())
+ return ACCESS_RESULT_DATA_CORRUPTION;
+
+ // Create an allocator for the mapped file. Ownership passes to the allocator.
+ source->allocator = std::make_unique<base::PersistentHistogramAllocator>(
+ std::move(memory_allocator));
+
+ // Check that an "independent" file has the necessary information present.
+ if (source->association == ASSOCIATE_INTERNAL_PROFILE &&
+ !PersistentSystemProfile::GetSystemProfile(
+ *source->allocator->memory_allocator(), nullptr)) {
+ return ACCESS_RESULT_NO_PROFILE;
+ }
+
+ return ACCESS_RESULT_SUCCESS;
+}
+
+// static
+void FileMetricsProvider::MergeHistogramDeltasFromSource(SourceInfo* source) {
+ DCHECK(source->allocator);
+ SCOPED_UMA_HISTOGRAM_TIMER("UMA.FileMetricsProvider.SnapshotTime.File");
+ base::PersistentHistogramAllocator::Iterator histogram_iter(
+ source->allocator.get());
+
+ const bool read_only = kSourceOptions[source->type].is_read_only;
+ int histogram_count = 0;
+ while (true) {
+ std::unique_ptr<base::HistogramBase> histogram = histogram_iter.GetNext();
+ if (!histogram)
+ break;
+ if (read_only) {
+ source->allocator->MergeHistogramFinalDeltaToStatisticsRecorder(
+ histogram.get());
+ } else {
+ source->allocator->MergeHistogramDeltaToStatisticsRecorder(
+ histogram.get());
+ }
+ ++histogram_count;
+ }
+
+ source->read_complete = true;
+ DVLOG(1) << "Reported " << histogram_count << " histograms from "
+ << source->path.value();
+}
+
+// static
+void FileMetricsProvider::RecordHistogramSnapshotsFromSource(
+ base::HistogramSnapshotManager* snapshot_manager,
+ SourceInfo* source) {
+ DCHECK_NE(SOURCE_HISTOGRAMS_ACTIVE_FILE, source->type);
+
+ base::PersistentHistogramAllocator::Iterator histogram_iter(
+ source->allocator.get());
+
+ int histogram_count = 0;
+ while (true) {
+ std::unique_ptr<base::HistogramBase> histogram = histogram_iter.GetNext();
+ if (!histogram)
+ break;
+ snapshot_manager->PrepareFinalDelta(histogram.get());
+ ++histogram_count;
+ }
+
+ source->read_complete = true;
+ DVLOG(1) << "Reported " << histogram_count << " histograms from "
+ << source->path.value();
+}
+
+FileMetricsProvider::AccessResult FileMetricsProvider::HandleFilterSource(
+ SourceInfo* source,
+ const base::FilePath& path) {
+ if (!source->filter)
+ return ACCESS_RESULT_SUCCESS;
+
+ // Alternatively, pass a Params object to the filter like what was originally
+ // used to configure the source.
+ // Params params(path, source->type, source->association, source->prefs_key);
+ FilterAction action = source->filter.Run(path);
+ switch (action) {
+ case FILTER_PROCESS_FILE:
+ // Process the file.
+ return ACCESS_RESULT_SUCCESS;
+
+ case FILTER_ACTIVE_THIS_PID:
+ // Even the file for the current process has to be touched or its stamp
+ // will be less than "last processed" and thus skipped on future runs,
+ // even those done by new instances of the browser if a pref key is
+ // provided so that the last-uploaded stamp is recorded.
+ case FILTER_TRY_LATER: {
+ // Touch the file with the current timestamp making it (presumably) the
+ // newest file in the directory.
+ base::Time now = base::Time::Now();
+ base::TouchFile(path, /*accessed=*/now, /*modified=*/now);
+ if (action == FILTER_ACTIVE_THIS_PID)
+ return ACCESS_RESULT_THIS_PID;
+ return ACCESS_RESULT_FILTER_TRY_LATER;
+ }
+
+ case FILTER_SKIP_FILE:
+ switch (source->type) {
+ case SOURCE_HISTOGRAMS_ATOMIC_FILE:
+ case SOURCE_HISTOGRAMS_ATOMIC_DIR:
+ // Only "atomic" files are deleted (best-effort).
+ DeleteFileWhenPossible(path);
+ break;
+ case SOURCE_HISTOGRAMS_ACTIVE_FILE:
+ // File will presumably get modified elsewhere and thus tried again.
+ break;
+ }
+ return ACCESS_RESULT_FILTER_SKIP_FILE;
+ }
+
+ // Code never gets here but some compilers don't realize that and so complain
+ // that "not all control paths return a value".
+ NOTREACHED();
+ return ACCESS_RESULT_SUCCESS;
+}
+
+void FileMetricsProvider::ScheduleSourcesCheck() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+
+ if (sources_to_check_.empty())
+ return;
+
+ // Create an independent list of sources for checking. This will be Owned()
+ // by the reply call given to the task-runner, to be deleted when that call
+ // has returned. It is also passed Unretained() to the task itself, safe
+ // because that must complete before the reply runs.
+ SourceInfoList* check_list = new SourceInfoList();
+ std::swap(sources_to_check_, *check_list);
+ task_runner_->PostTaskAndReply(
+ FROM_HERE,
+ base::BindOnce(
+ &FileMetricsProvider::CheckAndMergeMetricSourcesOnTaskRunner,
+ base::Unretained(check_list)),
+ base::BindOnce(&FileMetricsProvider::RecordSourcesChecked,
+ weak_factory_.GetWeakPtr(), base::Owned(check_list)));
+}
+
+void FileMetricsProvider::RecordSourcesChecked(SourceInfoList* checked) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+
+ // Sources that still have an allocator at this point are read/write "active"
+ // files that may need their contents merged on-demand. If there is no
+ // allocator (not a read/write file) but a read was done on the task-runner,
+ // try again immediately to see if more is available (in a directory of
+ // files). Otherwise, remember the source for checking again at a later time.
+ bool did_read = false;
+ for (auto iter = checked->begin(); iter != checked->end();) {
+ auto temp = iter++;
+ SourceInfo* source = temp->get();
+ if (source->read_complete) {
+ RecordSourceAsRead(source);
+ did_read = true;
+ }
+ if (source->allocator) {
+ if (source->association == ASSOCIATE_INTERNAL_PROFILE) {
+ sources_with_profile_.splice(sources_with_profile_.end(), *checked,
+ temp);
+ } else {
+ sources_mapped_.splice(sources_mapped_.end(), *checked, temp);
+ }
+ } else {
+ sources_to_check_.splice(sources_to_check_.end(), *checked, temp);
+ }
+ }
+
+ // If a read was done, schedule another one immediately. In the case of a
+ // directory of files, this ensures that all entries get processed. It's
+ // done here instead of as a loop in CheckAndMergeMetricSourcesOnTaskRunner
+ // so that (a) it gives the disk a rest and (b) testing of individual reads
+ // is possible.
+ if (did_read)
+ ScheduleSourcesCheck();
+}
+
+void FileMetricsProvider::DeleteFileAsync(const base::FilePath& path) {
+ task_runner_->PostTask(FROM_HERE,
+ base::BindOnce(DeleteFileWhenPossible, path));
+}
+
+void FileMetricsProvider::RecordSourceAsRead(SourceInfo* source) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+
+ // Persistently record the "last seen" timestamp of the source file to
+ // ensure that the file is never read again unless it is modified again.
+ if (pref_service_ && !source->prefs_key.empty()) {
+ pref_service_->SetInt64(
+ metrics::prefs::kMetricsLastSeenPrefix + source->prefs_key,
+ source->last_seen.ToInternalValue());
+ }
+}
+
+void FileMetricsProvider::OnDidCreateMetricsLog() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+
+ // Schedule a check to see if there are new metrics to load. If so, they will
+ // be reported during the next collection run after this one. The check is run
+ // off of a MayBlock() TaskRunner so as to not cause delays on the main UI
+ // thread (which is currently where metric collection is done).
+ ScheduleSourcesCheck();
+
+ // Clear any data for initial metrics since they're always reported
+ // before the first call to this method. It couldn't be released after
+ // being reported in RecordInitialHistogramSnapshots because the data
+ // will continue to be used by the caller after that method returns. Once
+ // here, though, all actions to be done on the data have been completed.
+ for (const std::unique_ptr<SourceInfo>& source : sources_for_previous_run_)
+ DeleteFileAsync(source->path);
+ sources_for_previous_run_.clear();
+}
+
+bool FileMetricsProvider::ProvideIndependentMetrics(
+ SystemProfileProto* system_profile_proto,
+ base::HistogramSnapshotManager* snapshot_manager) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+
+ while (!sources_with_profile_.empty()) {
+ SourceInfo* source = sources_with_profile_.begin()->get();
+ DCHECK(source->allocator);
+
+ bool success = false;
+ RecordEmbeddedProfileResult(EMBEDDED_PROFILE_ATTEMPT);
+ if (PersistentSystemProfile::GetSystemProfile(
+ *source->allocator->memory_allocator(), system_profile_proto)) {
+ RecordHistogramSnapshotsFromSource(snapshot_manager, source);
+ success = true;
+ RecordEmbeddedProfileResult(EMBEDDED_PROFILE_FOUND);
+ } else {
+ RecordEmbeddedProfileResult(EMBEDDED_PROFILE_DROPPED);
+
+ // TODO(bcwhite): Remove these once crbug/695880 is resolved.
+
+ int histogram_count = 0;
+ base::PersistentHistogramAllocator::Iterator histogram_iter(
+ source->allocator.get());
+ while (histogram_iter.GetNext()) {
+ ++histogram_count;
+ }
+ UMA_HISTOGRAM_COUNTS_10000(
+ "UMA.FileMetricsProvider.EmbeddedProfile.DroppedHistogramCount",
+ histogram_count);
+ }
+
+ // Regardless of whether this source was successfully recorded, it is never
+ // read again.
+ source->read_complete = true;
+ RecordSourceAsRead(source);
+ sources_to_check_.splice(sources_to_check_.end(), sources_with_profile_,
+ sources_with_profile_.begin());
+ ScheduleSourcesCheck();
+
+ if (success)
+ return true;
+ }
+
+ return false;
+}
+
+bool FileMetricsProvider::HasPreviousSessionData() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+
+ // Check all sources for previous run to see if they need to be read.
+ for (auto iter = sources_for_previous_run_.begin();
+ iter != sources_for_previous_run_.end();) {
+ SCOPED_UMA_HISTOGRAM_TIMER("UMA.FileMetricsProvider.InitialCheckTime.File");
+
+ auto temp = iter++;
+ SourceInfo* source = temp->get();
+
+ // This would normally be done on a background I/O thread but there
+ // hasn't been a chance to run any at the time this method is called.
+ // Do the check in-line.
+ AccessResult result = CheckAndMapMetricSource(source);
+ UMA_HISTOGRAM_ENUMERATION("UMA.FileMetricsProvider.InitialAccessResult",
+ result, ACCESS_RESULT_MAX);
+
+ // If it couldn't be accessed, remove it from the list. There is only ever
+ // one chance to record it so no point keeping it around for later. Also
+ // mark it as having been read since uploading it with a future browser
+ // run would associate it with the then-previous run which would no longer
+ // be the run from which it came.
+ if (result != ACCESS_RESULT_SUCCESS) {
+ DCHECK(!source->allocator);
+ RecordSourceAsRead(source);
+ DeleteFileAsync(source->path);
+ sources_for_previous_run_.erase(temp);
+ continue;
+ }
+
+ DCHECK(source->allocator);
+
+ // If the source should be associated with an existing internal profile,
+ // move it to |sources_with_profile_| for later upload.
+ if (source->association == ASSOCIATE_INTERNAL_PROFILE_OR_PREVIOUS_RUN) {
+ if (PersistentSystemProfile::HasSystemProfile(
+ *source->allocator->memory_allocator())) {
+ RecordEmbeddedProfileResult(EMBEDDED_PROFILE_ATTEMPT);
+ RecordEmbeddedProfileResult(EMBEDDED_PROFILE_FALLBACK);
+ sources_with_profile_.splice(sources_with_profile_.end(),
+ sources_for_previous_run_, temp);
+ }
+ }
+ }
+
+ return !sources_for_previous_run_.empty();
+}
+
+void FileMetricsProvider::RecordInitialHistogramSnapshots(
+ base::HistogramSnapshotManager* snapshot_manager) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+
+ for (const std::unique_ptr<SourceInfo>& source : sources_for_previous_run_) {
+ SCOPED_UMA_HISTOGRAM_TIMER(
+ "UMA.FileMetricsProvider.InitialSnapshotTime.File");
+
+ // The source needs to have an allocator attached to it in order to read
+ // histograms out of it.
+ DCHECK(!source->read_complete);
+ DCHECK(source->allocator);
+
+ // Dump all histograms contained within the source to the snapshot-manager.
+ RecordHistogramSnapshotsFromSource(snapshot_manager, source.get());
+
+ // Update the last-seen time so it isn't read again unless it changes.
+ RecordSourceAsRead(source.get());
+ }
+}
+
+void FileMetricsProvider::MergeHistogramDeltas() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+
+ for (std::unique_ptr<SourceInfo>& source : sources_mapped_) {
+ MergeHistogramDeltasFromSource(source.get());
+ }
+}
+
+} // namespace metrics
diff --git a/components/metrics/file_metrics_provider.h b/components/metrics/file_metrics_provider.h
new file mode 100644
index 0000000..7465d93
--- /dev/null
+++ b/components/metrics/file_metrics_provider.h
@@ -0,0 +1,323 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_FILE_METRICS_PROVIDER_H_
+#define COMPONENTS_METRICS_FILE_METRICS_PROVIDER_H_
+
+#include <list>
+#include <memory>
+#include <string>
+
+#include "base/callback_forward.h"
+#include "base/files/file_path.h"
+#include "base/gtest_prod_util.h"
+#include "base/memory/weak_ptr.h"
+#include "base/metrics/statistics_recorder.h"
+#include "base/sequence_checker.h"
+#include "base/time/time.h"
+#include "components/metrics/metrics_provider.h"
+
+class PrefRegistrySimple;
+class PrefService;
+
+namespace base {
+class TaskRunner;
+}
+
+namespace metrics {
+
+// FileMetricsProvider gathers and logs histograms written to files on disk.
+// Any number of files can be registered and will be polled once per upload
+// cycle (at startup and periodically thereafter -- about every 30 minutes
+// for desktop) for data to send.
+class FileMetricsProvider : public MetricsProvider,
+ public base::StatisticsRecorder::HistogramProvider {
+ public:
+ struct Params;
+
+ enum SourceType {
+ // "Atomic" files are a collection of histograms that are written
+ // completely in a single atomic operation (typically a write followed
+ // by an atomic rename) and the file is never updated again except to
+ // be replaced by a completely new set of histograms. This is the only
+ // option that can be used if the file is not writeable by *this*
+ // process. Once the file has been read, an attempt will be made to
+ // delete it thus providing some measure of safety should different
+ // instantiations (such as by different users of a system-level install)
+ // try to read it. In case the delete operation fails, this class
+ // persistently tracks the last-modified time of the file so it will
+ // not be read a second time.
+ SOURCE_HISTOGRAMS_ATOMIC_FILE,
+
+ // A directory of atomic PMA files. This handles a directory in which
+ // files of metrics are atomically added. Only files ending with ".pma"
+ // will be read. They are read according to their last-modified time and
+ // never read more that once (unless they change). Only one file will
+ // be read per reporting cycle. Filenames that start with a dot (.) or
+ // an underscore (_) are ignored so temporary files (perhaps created by
+ // the ImportantFileWriter) will not get read. Files that have been
+ // read will be attempted to be deleted; should those files not be
+ // deletable by this process, it is the reponsibility of the producer
+ // to keep the directory pruned in some manner. Added files must have a
+ // timestamp later (not the same or earlier) than the newest file that
+ // already exists or it may be assumed to have been already uploaded.
+ SOURCE_HISTOGRAMS_ATOMIC_DIR,
+
+ // "Active" files may be open by one or more other processes and updated
+ // at any time with new samples or new histograms. Such files may also be
+ // inactive for any period of time only to be opened again and have new
+ // data written to them. The file should probably never be deleted because
+ // there would be no guarantee that the data has been reported.
+ // TODO(bcwhite): Enable when read/write mem-mapped files are supported.
+ SOURCE_HISTOGRAMS_ACTIVE_FILE,
+ };
+
+ enum SourceAssociation {
+ // Associates the metrics in the file with the current run of the browser.
+ // The reporting will take place as part of the normal logging of
+ // histograms.
+ ASSOCIATE_CURRENT_RUN,
+
+ // Associates the metrics in the file with the previous run of the browesr.
+ // The reporting will take place as part of the "stability" histograms.
+ // This is important when metrics are dumped as part of a crash of the
+ // previous run. This can only be used with FILE_HISTOGRAMS_ATOMIC.
+ ASSOCIATE_PREVIOUS_RUN,
+
+ // Associates the metrics in the file with the a profile embedded in the
+ // same file. The reporting will take place at a convenient time after
+ // startup when the browser is otherwise idle. If there is no embedded
+ // system profile, these metrics will be lost.
+ ASSOCIATE_INTERNAL_PROFILE,
+
+ // Like above but fall back to ASSOCIATE_PREVIOUS_RUN if there is no
+ // embedded profile. This has a small cost during startup as that is
+ // when previous-run metrics are sent so the file has be checked at
+ // that time even though actual transfer will be delayed if an
+ // embedded profile is found.
+ ASSOCIATE_INTERNAL_PROFILE_OR_PREVIOUS_RUN,
+ };
+
+ enum FilterAction {
+ // Process this file normally.
+ FILTER_PROCESS_FILE,
+
+ // This file is the active metrics file for the current process. Don't
+ // do anything with it. This is effectively "try later" but isn't
+ // added to the results histogram because the file has to be ignored
+ // throughout the life of the browser and that skews the distribution.
+ FILTER_ACTIVE_THIS_PID,
+
+ // Try again. This could happen within milliseconds or minutes but no other
+ // files from the same source will get processed in between. The process
+ // must have permission to "touch" the file and alter its last-modified
+ // time because files are always processed in order of those stamps.
+ FILTER_TRY_LATER,
+
+ // Skip this file. This file will not be processed until it has changed
+ // (i.e. had its last-modifided time updated). If it is "atomic", an
+ // attempt will be made to delete it.
+ FILTER_SKIP_FILE,
+ };
+
+ // A "filter" can be defined to determine what to do on a per-file basis.
+ // This is called only after a file has been found to be the next one to
+ // be processed so it's okay if filter calls are relatively expensive.
+ // Calls are made on a background thread of low-priority and capable of
+ // doing I/O.
+ using FilterCallback =
+ base::RepeatingCallback<FilterAction(const base::FilePath& path)>;
+
+ // Parameters for RegisterSource, defined as a structure to allow new
+ // ones to be added (with default values) that doesn't require changes
+ // to all call sites.
+ struct Params {
+ Params(const base::FilePath& path,
+ SourceType type,
+ SourceAssociation association,
+ base::StringPiece prefs_key = base::StringPiece());
+
+ ~Params();
+
+ // The standard parameters, set during construction.
+ const base::FilePath path;
+ const SourceType type;
+ const SourceAssociation association;
+ const base::StringPiece prefs_key;
+
+ // Other parameters that can be set after construction.
+ FilterCallback filter; // Run-time check for what to do with file.
+ base::TimeDelta max_age; // Maximum age of a file (0=unlimited).
+ size_t max_dir_kib = 0; // Maximum bytes in a directory (0=inf).
+ size_t max_dir_files = 100; // Maximum files in a directory (0=inf).
+ };
+
+ explicit FileMetricsProvider(PrefService* local_state);
+ ~FileMetricsProvider() override;
+
+ // Indicates a file or directory to be monitored and how the file or files
+ // within that directory are used. Because some metadata may need to persist
+ // across process restarts, preferences entries are used based on the
+ // |prefs_key| name. Call RegisterPrefs() with the same name to create the
+ // necessary keys in advance. Set |prefs_key| empty (nullptr will work) if
+ // no persistence is required. ACTIVE files shouldn't have a pref key as
+ // they update internal state about what has been previously sent.
+ void RegisterSource(const Params& params);
+
+ // Registers all necessary preferences for maintaining persistent state
+ // about a monitored file across process restarts. The |prefs_key| is
+ // typically the filename.
+ static void RegisterPrefs(PrefRegistrySimple* prefs,
+ const base::StringPiece prefs_key);
+
+ // Sets the task runner to use for testing.
+ static void SetTaskRunnerForTesting(
+ const scoped_refptr<base::TaskRunner>& task_runner);
+
+ private:
+ friend class FileMetricsProviderTest;
+
+ // The different results that can occur accessing a file.
+ enum AccessResult {
+ // File was successfully mapped.
+ ACCESS_RESULT_SUCCESS,
+
+ // File does not exist.
+ ACCESS_RESULT_DOESNT_EXIST,
+
+ // File exists but not modified since last read.
+ ACCESS_RESULT_NOT_MODIFIED,
+
+ // File is not valid: is a directory or zero-size.
+ ACCESS_RESULT_INVALID_FILE,
+
+ // System could not map file into memory.
+ ACCESS_RESULT_SYSTEM_MAP_FAILURE,
+
+ // File had invalid contents.
+ ACCESS_RESULT_INVALID_CONTENTS,
+
+ // File could not be opened.
+ ACCESS_RESULT_NO_OPEN,
+
+ // File contents were internally deleted.
+ ACCESS_RESULT_MEMORY_DELETED,
+
+ // File is scheduled to be tried again later.
+ ACCESS_RESULT_FILTER_TRY_LATER,
+
+ // File was skipped according to filtering rules.
+ ACCESS_RESULT_FILTER_SKIP_FILE,
+
+ // File was skipped because it exceeds the maximum age.
+ ACCESS_RESULT_TOO_OLD,
+
+ // File was skipped because too many files in directory.
+ ACCESS_RESULT_TOO_MANY_FILES,
+
+ // File was skipped because too many bytes in directory.
+ ACCESS_RESULT_TOO_MANY_BYTES,
+
+ // The file was skipped because it's being written by this process.
+ ACCESS_RESULT_THIS_PID,
+
+ // The file had no embedded system profile.
+ ACCESS_RESULT_NO_PROFILE,
+
+ // The file had internal data corruption.
+ ACCESS_RESULT_DATA_CORRUPTION,
+
+ ACCESS_RESULT_MAX
+ };
+
+ // Information about sources being monitored; defined and used exclusively
+ // inside the .cc file.
+ struct SourceInfo;
+ using SourceInfoList = std::list<std::unique_ptr<SourceInfo>>;
+
+ // Records an access result in a histogram.
+ static void RecordAccessResult(AccessResult result);
+
+ // Looks for the next file to read within a directory. Returns true if a
+ // file was found. This is part of CheckAndMapNewMetricSourcesOnTaskRunner
+ // and so runs on an thread capable of I/O. The |source| structure will
+ // be internally updated to indicate the next file to be read.
+ static bool LocateNextFileInDirectory(SourceInfo* source);
+
+ // Handles the completion of a source.
+ static void FinishedWithSource(SourceInfo* source, AccessResult result);
+
+ // Checks a list of sources (on a task-runner allowed to do I/O) and merge
+ // any data found within them.
+ static void CheckAndMergeMetricSourcesOnTaskRunner(SourceInfoList* sources);
+
+ // Checks a single source and maps it into memory.
+ static AccessResult CheckAndMapMetricSource(SourceInfo* source);
+
+ // Merges all of the histograms from a |source| to the StatisticsRecorder.
+ static void MergeHistogramDeltasFromSource(SourceInfo* source);
+
+ // Records all histograms from a given source via a snapshot-manager.
+ static void RecordHistogramSnapshotsFromSource(
+ base::HistogramSnapshotManager* snapshot_manager,
+ SourceInfo* source);
+
+ // Calls source filter (if any) and returns the desired action.
+ static AccessResult HandleFilterSource(SourceInfo* source,
+ const base::FilePath& path);
+
+ // Creates a task to check all monitored sources for updates.
+ void ScheduleSourcesCheck();
+
+ // Takes a list of sources checked by an external task and determines what
+ // to do with each.
+ void RecordSourcesChecked(SourceInfoList* checked);
+
+ // Schedules the deletion of a file in the background using the task-runner.
+ void DeleteFileAsync(const base::FilePath& path);
+
+ // Updates the persistent state information to show a source as being read.
+ void RecordSourceAsRead(SourceInfo* source);
+
+ // metrics::MetricsProvider:
+ void OnDidCreateMetricsLog() override;
+ bool ProvideIndependentMetrics(
+ SystemProfileProto* system_profile_proto,
+ base::HistogramSnapshotManager* snapshot_manager) override;
+ bool HasPreviousSessionData() override;
+ void RecordInitialHistogramSnapshots(
+ base::HistogramSnapshotManager* snapshot_manager) override;
+
+ // base::StatisticsRecorder::HistogramProvider:
+ void MergeHistogramDeltas() override;
+
+ // A task-runner capable of performing I/O.
+ scoped_refptr<base::TaskRunner> task_runner_;
+
+ // A list of sources not currently active that need to be checked for changes.
+ SourceInfoList sources_to_check_;
+
+ // A list of currently active sources to be merged when required.
+ SourceInfoList sources_mapped_;
+
+ // A list of currently active sources to be merged when required.
+ SourceInfoList sources_with_profile_;
+
+ // A list of sources for a previous run. These are held separately because
+ // they are not subject to the periodic background checking that handles
+ // metrics for the current run.
+ SourceInfoList sources_for_previous_run_;
+
+ // The preferences-service used to store persistent state about sources.
+ PrefService* pref_service_;
+
+ SEQUENCE_CHECKER(sequence_checker_);
+ base::WeakPtrFactory<FileMetricsProvider> weak_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(FileMetricsProvider);
+};
+
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_FILE_METRICS_PROVIDER_H_
diff --git a/components/metrics/file_metrics_provider_unittest.cc b/components/metrics/file_metrics_provider_unittest.cc
new file mode 100644
index 0000000..ca5ca9a
--- /dev/null
+++ b/components/metrics/file_metrics_provider_unittest.cc
@@ -0,0 +1,1069 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/file_metrics_provider.h"
+
+#include <functional>
+
+#include "base/files/file_util.h"
+#include "base/files/memory_mapped_file.h"
+#include "base/files/scoped_temp_dir.h"
+#include "base/macros.h"
+#include "base/metrics/histogram.h"
+#include "base/metrics/histogram_flattener.h"
+#include "base/metrics/histogram_snapshot_manager.h"
+#include "base/metrics/persistent_histogram_allocator.h"
+#include "base/metrics/persistent_memory_allocator.h"
+#include "base/metrics/sparse_histogram.h"
+#include "base/metrics/statistics_recorder.h"
+#include "base/strings/stringprintf.h"
+#include "base/test/test_simple_task_runner.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "base/time/time.h"
+#include "components/metrics/metrics_pref_names.h"
+#include "components/metrics/persistent_system_profile.h"
+#include "components/prefs/pref_registry_simple.h"
+#include "components/prefs/testing_pref_service.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/metrics_proto/system_profile.pb.h"
+
+namespace {
+const char kMetricsName[] = "TestMetrics";
+const char kMetricsFilename[] = "file.metrics";
+} // namespace
+
+namespace metrics {
+
+class HistogramFlattenerDeltaRecorder : public base::HistogramFlattener {
+ public:
+ HistogramFlattenerDeltaRecorder() {}
+
+ void RecordDelta(const base::HistogramBase& histogram,
+ const base::HistogramSamples& snapshot) override {
+ // Only remember locally created histograms; they have exactly 2 chars.
+ if (strlen(histogram.histogram_name()) == 2)
+ recorded_delta_histogram_names_.push_back(histogram.histogram_name());
+ }
+
+ std::vector<std::string> GetRecordedDeltaHistogramNames() {
+ return recorded_delta_histogram_names_;
+ }
+
+ private:
+ std::vector<std::string> recorded_delta_histogram_names_;
+
+ DISALLOW_COPY_AND_ASSIGN(HistogramFlattenerDeltaRecorder);
+};
+
+
+class FileMetricsProviderTest : public testing::TestWithParam<bool> {
+ protected:
+ const size_t kSmallFileSize = 64 << 10; // 64 KiB
+ const size_t kLargeFileSize = 2 << 20; // 2 MiB
+
+ enum : int { kMaxCreateHistograms = 10 };
+
+ FileMetricsProviderTest()
+ : create_large_files_(GetParam()),
+ task_runner_(new base::TestSimpleTaskRunner()),
+ thread_task_runner_handle_(task_runner_),
+ statistics_recorder_(
+ base::StatisticsRecorder::CreateTemporaryForTesting()),
+ prefs_(new TestingPrefServiceSimple) {
+ EXPECT_TRUE(temp_dir_.CreateUniqueTempDir());
+ FileMetricsProvider::RegisterPrefs(prefs_->registry(), kMetricsName);
+ FileMetricsProvider::SetTaskRunnerForTesting(task_runner_);
+ }
+
+ ~FileMetricsProviderTest() override {
+ // Clear out any final remaining tasks.
+ task_runner_->RunUntilIdle();
+ FileMetricsProvider::SetTaskRunnerForTesting(nullptr);
+ DCHECK_EQ(0U, filter_actions_remaining_);
+ // If a global histogram allocator exists at this point then it likely
+ // acquired histograms that will continue to point to the released
+ // memory and potentially cause use-after-free memory corruption.
+ DCHECK(!base::GlobalHistogramAllocator::Get());
+ }
+
+ TestingPrefServiceSimple* prefs() { return prefs_.get(); }
+ base::FilePath temp_dir() { return temp_dir_.GetPath(); }
+ base::FilePath metrics_file() {
+ return temp_dir_.GetPath().AppendASCII(kMetricsFilename);
+ }
+
+ FileMetricsProvider* provider() {
+ if (!provider_)
+ provider_.reset(new FileMetricsProvider(prefs()));
+ return provider_.get();
+ }
+
+ void OnDidCreateMetricsLog() {
+ provider()->OnDidCreateMetricsLog();
+ }
+
+ bool HasPreviousSessionData() { return provider()->HasPreviousSessionData(); }
+
+ void MergeHistogramDeltas() {
+ provider()->MergeHistogramDeltas();
+ }
+
+ bool ProvideIndependentMetrics(
+ SystemProfileProto* profile_proto,
+ base::HistogramSnapshotManager* snapshot_manager) {
+ return provider()->ProvideIndependentMetrics(profile_proto,
+ snapshot_manager);
+ }
+
+ void RecordInitialHistogramSnapshots(
+ base::HistogramSnapshotManager* snapshot_manager) {
+ provider()->RecordInitialHistogramSnapshots(snapshot_manager);
+ }
+
+ size_t GetSnapshotHistogramCount() {
+ // Merge the data from the allocator into the StatisticsRecorder.
+ provider()->MergeHistogramDeltas();
+
+ // Flatten what is known to see what has changed since the last time.
+ HistogramFlattenerDeltaRecorder flattener;
+ base::HistogramSnapshotManager snapshot_manager(&flattener);
+ // "true" to the begin() includes histograms held in persistent storage.
+ base::StatisticsRecorder::PrepareDeltas(true, base::Histogram::kNoFlags,
+ base::Histogram::kNoFlags,
+ &snapshot_manager);
+ return flattener.GetRecordedDeltaHistogramNames().size();
+ }
+
+ size_t GetIndependentHistogramCount() {
+ HistogramFlattenerDeltaRecorder flattener;
+ base::HistogramSnapshotManager snapshot_manager(&flattener);
+ SystemProfileProto profile_proto;
+ if (!provider()->ProvideIndependentMetrics(&profile_proto,
+ &snapshot_manager)) {
+ return 0;
+ }
+ return flattener.GetRecordedDeltaHistogramNames().size();
+ }
+
+ void CreateGlobalHistograms(int histogram_count) {
+ DCHECK_GT(kMaxCreateHistograms, histogram_count);
+
+ // Create both sparse and normal histograms in the allocator.
+ created_histograms_[0] = base::SparseHistogram::FactoryGet("h0", 0);
+ created_histograms_[0]->Add(0);
+ for (int i = 1; i < histogram_count; ++i) {
+ created_histograms_[i] = base::Histogram::FactoryGet(
+ base::StringPrintf("h%d", i), 1, 100, 10, 0);
+ created_histograms_[i]->Add(i);
+ }
+ }
+
+ void RunTasks() {
+ // Run pending tasks twice: Once for IPC calls, once for replies. Don't
+ // use RunUntilIdle() because that can do more work than desired.
+ task_runner_->RunPendingTasks();
+ task_runner_->RunPendingTasks();
+ }
+
+ void WriteMetricsFile(const base::FilePath& path,
+ base::PersistentHistogramAllocator* metrics) {
+ base::File writer(path,
+ base::File::FLAG_CREATE_ALWAYS | base::File::FLAG_WRITE);
+ // Use DCHECK so the stack-trace will indicate where this was called.
+ DCHECK(writer.IsValid()) << path.value();
+ size_t file_size = create_large_files_ ? metrics->size() : metrics->used();
+ int written = writer.Write(0, (const char*)metrics->data(), file_size);
+ DCHECK_EQ(static_cast<int>(file_size), written);
+ }
+
+ void WriteMetricsFileAtTime(const base::FilePath& path,
+ base::PersistentHistogramAllocator* metrics,
+ base::Time write_time) {
+ WriteMetricsFile(path, metrics);
+ base::TouchFile(path, write_time, write_time);
+ }
+
+ std::unique_ptr<base::PersistentHistogramAllocator>
+ CreateMetricsFileWithHistograms(
+ const base::FilePath& file_path,
+ base::Time write_time,
+ int histogram_count,
+ const std::function<void(base::PersistentHistogramAllocator*)>&
+ callback) {
+ base::GlobalHistogramAllocator::CreateWithLocalMemory(
+ create_large_files_ ? kLargeFileSize : kSmallFileSize,
+ 0, kMetricsName);
+
+ CreateGlobalHistograms(histogram_count);
+
+ std::unique_ptr<base::PersistentHistogramAllocator> histogram_allocator =
+ base::GlobalHistogramAllocator::ReleaseForTesting();
+ callback(histogram_allocator.get());
+
+ WriteMetricsFileAtTime(file_path, histogram_allocator.get(), write_time);
+ return histogram_allocator;
+ }
+
+ std::unique_ptr<base::PersistentHistogramAllocator>
+ CreateMetricsFileWithHistograms(int histogram_count) {
+ return CreateMetricsFileWithHistograms(
+ metrics_file(), base::Time::Now(), histogram_count,
+ [](base::PersistentHistogramAllocator* allocator) {});
+ }
+
+ base::HistogramBase* GetCreatedHistogram(int index) {
+ DCHECK_GT(kMaxCreateHistograms, index);
+ return created_histograms_[index];
+ }
+
+ void SetFilterActions(FileMetricsProvider::Params* params,
+ const FileMetricsProvider::FilterAction* actions,
+ size_t count) {
+ filter_actions_ = actions;
+ filter_actions_remaining_ = count;
+ params->filter = base::Bind(&FileMetricsProviderTest::FilterSourcePath,
+ base::Unretained(this));
+ }
+
+ const bool create_large_files_;
+
+ private:
+ FileMetricsProvider::FilterAction FilterSourcePath(
+ const base::FilePath& path) {
+ DCHECK_LT(0U, filter_actions_remaining_);
+ --filter_actions_remaining_;
+ return *filter_actions_++;
+ }
+
+ scoped_refptr<base::TestSimpleTaskRunner> task_runner_;
+ base::ThreadTaskRunnerHandle thread_task_runner_handle_;
+
+ std::unique_ptr<base::StatisticsRecorder> statistics_recorder_;
+ base::ScopedTempDir temp_dir_;
+ std::unique_ptr<TestingPrefServiceSimple> prefs_;
+ std::unique_ptr<FileMetricsProvider> provider_;
+ base::HistogramBase* created_histograms_[kMaxCreateHistograms];
+
+ const FileMetricsProvider::FilterAction* filter_actions_ = nullptr;
+ size_t filter_actions_remaining_ = 0;
+
+ DISALLOW_COPY_AND_ASSIGN(FileMetricsProviderTest);
+};
+
+// Run all test cases with both small and large files.
+INSTANTIATE_TEST_CASE_P(SmallAndLargeFiles,
+ FileMetricsProviderTest,
+ testing::Bool());
+
+
+TEST_P(FileMetricsProviderTest, AccessMetrics) {
+ ASSERT_FALSE(PathExists(metrics_file()));
+
+ base::Time metrics_time = base::Time::Now() - base::TimeDelta::FromMinutes(5);
+ std::unique_ptr<base::PersistentHistogramAllocator> histogram_allocator =
+ CreateMetricsFileWithHistograms(2);
+ ASSERT_TRUE(PathExists(metrics_file()));
+ base::TouchFile(metrics_file(), metrics_time, metrics_time);
+
+ // Register the file and allow the "checker" task to run.
+ provider()->RegisterSource(FileMetricsProvider::Params(
+ metrics_file(), FileMetricsProvider::SOURCE_HISTOGRAMS_ATOMIC_FILE,
+ FileMetricsProvider::ASSOCIATE_CURRENT_RUN, kMetricsName));
+
+ // Record embedded snapshots via snapshot-manager.
+ OnDidCreateMetricsLog();
+ RunTasks();
+ EXPECT_EQ(2U, GetSnapshotHistogramCount());
+ EXPECT_FALSE(base::PathExists(metrics_file()));
+
+ // Make sure a second call to the snapshot-recorder doesn't break anything.
+ OnDidCreateMetricsLog();
+ RunTasks();
+ EXPECT_EQ(0U, GetSnapshotHistogramCount());
+
+ // File should have been deleted but recreate it to test behavior should
+ // the file not be deleteable by this process.
+ WriteMetricsFileAtTime(metrics_file(), histogram_allocator.get(),
+ metrics_time);
+
+ // Second full run on the same file should produce nothing.
+ OnDidCreateMetricsLog();
+ RunTasks();
+ EXPECT_EQ(0U, GetSnapshotHistogramCount());
+ EXPECT_FALSE(base::PathExists(metrics_file()));
+
+ // Recreate the file to indicate that it is "new" and must be recorded.
+ metrics_time = metrics_time + base::TimeDelta::FromMinutes(1);
+ WriteMetricsFileAtTime(metrics_file(), histogram_allocator.get(),
+ metrics_time);
+
+ // This run should again have "new" histograms.
+ OnDidCreateMetricsLog();
+ RunTasks();
+ EXPECT_EQ(2U, GetSnapshotHistogramCount());
+ EXPECT_FALSE(base::PathExists(metrics_file()));
+}
+
+TEST_P(FileMetricsProviderTest, AccessTimeLimitedFile) {
+ ASSERT_FALSE(PathExists(metrics_file()));
+
+ base::Time metrics_time = base::Time::Now() - base::TimeDelta::FromHours(5);
+ std::unique_ptr<base::PersistentHistogramAllocator> histogram_allocator =
+ CreateMetricsFileWithHistograms(2);
+ ASSERT_TRUE(PathExists(metrics_file()));
+ base::TouchFile(metrics_file(), metrics_time, metrics_time);
+
+ // Register the file and allow the "checker" task to run.
+ FileMetricsProvider::Params params(
+ metrics_file(), FileMetricsProvider::SOURCE_HISTOGRAMS_ATOMIC_FILE,
+ FileMetricsProvider::ASSOCIATE_CURRENT_RUN, kMetricsName);
+ params.max_age = base::TimeDelta::FromHours(1);
+ provider()->RegisterSource(params);
+
+ // Attempt to access the file should return nothing.
+ OnDidCreateMetricsLog();
+ RunTasks();
+ EXPECT_EQ(0U, GetSnapshotHistogramCount());
+ EXPECT_FALSE(base::PathExists(metrics_file()));
+}
+
+TEST_P(FileMetricsProviderTest, FilterDelaysFile) {
+ ASSERT_FALSE(PathExists(metrics_file()));
+
+ base::Time now_time = base::Time::Now();
+ base::Time metrics_time = now_time - base::TimeDelta::FromMinutes(5);
+ std::unique_ptr<base::PersistentHistogramAllocator> histogram_allocator =
+ CreateMetricsFileWithHistograms(2);
+ ASSERT_TRUE(PathExists(metrics_file()));
+ base::TouchFile(metrics_file(), metrics_time, metrics_time);
+ base::File::Info fileinfo;
+ ASSERT_TRUE(base::GetFileInfo(metrics_file(), &fileinfo));
+ EXPECT_GT(base::Time::Now(), fileinfo.last_modified);
+
+ // Register the file and allow the "checker" task to run.
+ FileMetricsProvider::Params params(
+ metrics_file(), FileMetricsProvider::SOURCE_HISTOGRAMS_ATOMIC_FILE,
+ FileMetricsProvider::ASSOCIATE_CURRENT_RUN, kMetricsName);
+ const FileMetricsProvider::FilterAction actions[] = {
+ FileMetricsProvider::FILTER_TRY_LATER,
+ FileMetricsProvider::FILTER_PROCESS_FILE};
+ SetFilterActions(¶ms, actions, arraysize(actions));
+ provider()->RegisterSource(params);
+
+ // Processing the file should touch it but yield no results. File timestamp
+ // accuracy is limited so compare the touched time to a couple seconds past.
+ OnDidCreateMetricsLog();
+ RunTasks();
+ EXPECT_EQ(0U, GetSnapshotHistogramCount());
+ EXPECT_TRUE(base::PathExists(metrics_file()));
+ ASSERT_TRUE(base::GetFileInfo(metrics_file(), &fileinfo));
+ EXPECT_LT(metrics_time, fileinfo.last_modified);
+ EXPECT_LE(now_time - base::TimeDelta::FromSeconds(2), fileinfo.last_modified);
+
+ // Second full run on the same file should process the file.
+ OnDidCreateMetricsLog();
+ RunTasks();
+ EXPECT_EQ(2U, GetSnapshotHistogramCount());
+ EXPECT_FALSE(base::PathExists(metrics_file()));
+}
+
+TEST_P(FileMetricsProviderTest, FilterSkipsFile) {
+ ASSERT_FALSE(PathExists(metrics_file()));
+
+ base::Time now_time = base::Time::Now();
+ base::Time metrics_time = now_time - base::TimeDelta::FromMinutes(5);
+ std::unique_ptr<base::PersistentHistogramAllocator> histogram_allocator =
+ CreateMetricsFileWithHistograms(2);
+ ASSERT_TRUE(PathExists(metrics_file()));
+ base::TouchFile(metrics_file(), metrics_time, metrics_time);
+ base::File::Info fileinfo;
+ ASSERT_TRUE(base::GetFileInfo(metrics_file(), &fileinfo));
+ EXPECT_GT(base::Time::Now(), fileinfo.last_modified);
+
+ // Register the file and allow the "checker" task to run.
+ FileMetricsProvider::Params params(
+ metrics_file(), FileMetricsProvider::SOURCE_HISTOGRAMS_ATOMIC_FILE,
+ FileMetricsProvider::ASSOCIATE_CURRENT_RUN, kMetricsName);
+ const FileMetricsProvider::FilterAction actions[] = {
+ FileMetricsProvider::FILTER_SKIP_FILE};
+ SetFilterActions(¶ms, actions, arraysize(actions));
+ provider()->RegisterSource(params);
+
+ // Processing the file should delete it.
+ OnDidCreateMetricsLog();
+ RunTasks();
+ EXPECT_EQ(0U, GetSnapshotHistogramCount());
+ EXPECT_FALSE(base::PathExists(metrics_file()));
+}
+
+TEST_P(FileMetricsProviderTest, AccessDirectory) {
+ ASSERT_FALSE(PathExists(metrics_file()));
+
+ base::GlobalHistogramAllocator::CreateWithLocalMemory(
+ 64 << 10, 0, kMetricsName);
+ base::GlobalHistogramAllocator* allocator =
+ base::GlobalHistogramAllocator::Get();
+ base::HistogramBase* histogram;
+
+ // Create files starting with a timestamp a few minutes back.
+ base::Time base_time = base::Time::Now() - base::TimeDelta::FromMinutes(10);
+
+ // Create some files in an odd order. The files are "touched" back in time to
+ // ensure that each file has a later timestamp on disk than the previous one.
+ base::ScopedTempDir metrics_files;
+ EXPECT_TRUE(metrics_files.CreateUniqueTempDir());
+ WriteMetricsFileAtTime(metrics_files.GetPath().AppendASCII(".foo.pma"),
+ allocator, base_time);
+ WriteMetricsFileAtTime(metrics_files.GetPath().AppendASCII("_bar.pma"),
+ allocator, base_time);
+
+ histogram = base::Histogram::FactoryGet("h1", 1, 100, 10, 0);
+ histogram->Add(1);
+ WriteMetricsFileAtTime(metrics_files.GetPath().AppendASCII("a1.pma"),
+ allocator,
+ base_time + base::TimeDelta::FromMinutes(1));
+
+ histogram = base::Histogram::FactoryGet("h2", 1, 100, 10, 0);
+ histogram->Add(2);
+ WriteMetricsFileAtTime(metrics_files.GetPath().AppendASCII("c2.pma"),
+ allocator,
+ base_time + base::TimeDelta::FromMinutes(2));
+
+ histogram = base::Histogram::FactoryGet("h3", 1, 100, 10, 0);
+ histogram->Add(3);
+ WriteMetricsFileAtTime(metrics_files.GetPath().AppendASCII("b3.pma"),
+ allocator,
+ base_time + base::TimeDelta::FromMinutes(3));
+
+ histogram = base::Histogram::FactoryGet("h4", 1, 100, 10, 0);
+ histogram->Add(3);
+ WriteMetricsFileAtTime(metrics_files.GetPath().AppendASCII("d4.pma"),
+ allocator,
+ base_time + base::TimeDelta::FromMinutes(4));
+
+ base::TouchFile(metrics_files.GetPath().AppendASCII("b3.pma"),
+ base_time + base::TimeDelta::FromMinutes(5),
+ base_time + base::TimeDelta::FromMinutes(5));
+
+ WriteMetricsFileAtTime(metrics_files.GetPath().AppendASCII("baz"), allocator,
+ base_time + base::TimeDelta::FromMinutes(6));
+
+ // The global allocator has to be detached here so that no metrics created
+ // by code called below get stored in it as that would make for potential
+ // use-after-free operations if that code is called again.
+ base::GlobalHistogramAllocator::ReleaseForTesting();
+
+ // Register the file and allow the "checker" task to run.
+ provider()->RegisterSource(FileMetricsProvider::Params(
+ metrics_files.GetPath(),
+ FileMetricsProvider::SOURCE_HISTOGRAMS_ATOMIC_DIR,
+ FileMetricsProvider::ASSOCIATE_CURRENT_RUN, kMetricsName));
+
+ // Files could come out in the order: a1, c2, d4, b3. They are recognizeable
+ // by the number of histograms contained within each.
+ const uint32_t expect_order[] = {1, 2, 4, 3, 0};
+ for (size_t i = 0; i < arraysize(expect_order); ++i) {
+ // Record embedded snapshots via snapshot-manager.
+ OnDidCreateMetricsLog();
+ RunTasks();
+ EXPECT_EQ(expect_order[i], GetSnapshotHistogramCount()) << i;
+ }
+
+ EXPECT_FALSE(base::PathExists(metrics_files.GetPath().AppendASCII("a1.pma")));
+ EXPECT_FALSE(base::PathExists(metrics_files.GetPath().AppendASCII("c2.pma")));
+ EXPECT_FALSE(base::PathExists(metrics_files.GetPath().AppendASCII("b3.pma")));
+ EXPECT_FALSE(base::PathExists(metrics_files.GetPath().AppendASCII("d4.pma")));
+ EXPECT_TRUE(
+ base::PathExists(metrics_files.GetPath().AppendASCII(".foo.pma")));
+ EXPECT_TRUE(
+ base::PathExists(metrics_files.GetPath().AppendASCII("_bar.pma")));
+ EXPECT_TRUE(base::PathExists(metrics_files.GetPath().AppendASCII("baz")));
+}
+
+TEST_P(FileMetricsProviderTest, AccessDirectoryWithInvalidFiles) {
+ ASSERT_FALSE(PathExists(metrics_file()));
+
+ // Create files starting with a timestamp a few minutes back.
+ base::Time base_time = base::Time::Now() - base::TimeDelta::FromMinutes(10);
+
+ base::ScopedTempDir metrics_files;
+ EXPECT_TRUE(metrics_files.CreateUniqueTempDir());
+
+ CreateMetricsFileWithHistograms(
+ metrics_files.GetPath().AppendASCII("h1.pma"),
+ base_time + base::TimeDelta::FromMinutes(1), 1,
+ [](base::PersistentHistogramAllocator* allocator) {
+ allocator->memory_allocator()->SetMemoryState(
+ base::PersistentMemoryAllocator::MEMORY_DELETED);
+ });
+
+ CreateMetricsFileWithHistograms(
+ metrics_files.GetPath().AppendASCII("h2.pma"),
+ base_time + base::TimeDelta::FromMinutes(2), 2,
+ [](base::PersistentHistogramAllocator* allocator) {
+ SystemProfileProto profile_proto;
+ SystemProfileProto::FieldTrial* trial = profile_proto.add_field_trial();
+ trial->set_name_id(123);
+ trial->set_group_id(456);
+
+ PersistentSystemProfile persistent_profile;
+ persistent_profile.RegisterPersistentAllocator(
+ allocator->memory_allocator());
+ persistent_profile.SetSystemProfile(profile_proto, true);
+ });
+
+ CreateMetricsFileWithHistograms(
+ metrics_files.GetPath().AppendASCII("h3.pma"),
+ base_time + base::TimeDelta::FromMinutes(3), 3,
+ [](base::PersistentHistogramAllocator* allocator) {
+ allocator->memory_allocator()->SetMemoryState(
+ base::PersistentMemoryAllocator::MEMORY_DELETED);
+ });
+
+ {
+ base::File empty(metrics_files.GetPath().AppendASCII("h4.pma"),
+ base::File::FLAG_CREATE | base::File::FLAG_WRITE);
+ }
+ base::TouchFile(metrics_files.GetPath().AppendASCII("h4.pma"),
+ base_time + base::TimeDelta::FromMinutes(4),
+ base_time + base::TimeDelta::FromMinutes(4));
+
+ // Register the file and allow the "checker" task to run.
+ provider()->RegisterSource(FileMetricsProvider::Params(
+ metrics_files.GetPath(),
+ FileMetricsProvider::SOURCE_HISTOGRAMS_ATOMIC_DIR,
+ FileMetricsProvider::ASSOCIATE_INTERNAL_PROFILE, kMetricsName));
+
+ // No files yet.
+ EXPECT_EQ(0U, GetIndependentHistogramCount());
+ EXPECT_TRUE(base::PathExists(metrics_files.GetPath().AppendASCII("h1.pma")));
+ EXPECT_TRUE(base::PathExists(metrics_files.GetPath().AppendASCII("h2.pma")));
+ EXPECT_TRUE(base::PathExists(metrics_files.GetPath().AppendASCII("h3.pma")));
+ EXPECT_TRUE(base::PathExists(metrics_files.GetPath().AppendASCII("h4.pma")));
+
+ // H1 should be skipped and H2 available.
+ OnDidCreateMetricsLog();
+ RunTasks();
+ EXPECT_EQ(2U, GetIndependentHistogramCount());
+ EXPECT_FALSE(base::PathExists(metrics_files.GetPath().AppendASCII("h1.pma")));
+ EXPECT_TRUE(base::PathExists(metrics_files.GetPath().AppendASCII("h2.pma")));
+ EXPECT_TRUE(base::PathExists(metrics_files.GetPath().AppendASCII("h3.pma")));
+ EXPECT_TRUE(base::PathExists(metrics_files.GetPath().AppendASCII("h4.pma")));
+
+ // Nothing else should be found but the last (valid but empty) file will
+ // stick around to be processed later (should it get expanded).
+ OnDidCreateMetricsLog();
+ RunTasks();
+ EXPECT_EQ(0U, GetIndependentHistogramCount());
+ EXPECT_FALSE(base::PathExists(metrics_files.GetPath().AppendASCII("h2.pma")));
+ EXPECT_FALSE(base::PathExists(metrics_files.GetPath().AppendASCII("h3.pma")));
+ EXPECT_TRUE(base::PathExists(metrics_files.GetPath().AppendASCII("h4.pma")));
+}
+
+TEST_P(FileMetricsProviderTest, AccessTimeLimitedDirectory) {
+ ASSERT_FALSE(PathExists(metrics_file()));
+
+ base::GlobalHistogramAllocator::CreateWithLocalMemory(64 << 10, 0,
+ kMetricsName);
+ base::GlobalHistogramAllocator* allocator =
+ base::GlobalHistogramAllocator::Get();
+ base::HistogramBase* histogram;
+
+ // Create one old file and one new file.
+ base::ScopedTempDir metrics_files;
+ EXPECT_TRUE(metrics_files.CreateUniqueTempDir());
+ histogram = base::Histogram::FactoryGet("h1", 1, 100, 10, 0);
+ histogram->Add(1);
+ WriteMetricsFileAtTime(metrics_files.GetPath().AppendASCII("a1.pma"),
+ allocator,
+ base::Time::Now() - base::TimeDelta::FromHours(1));
+
+ histogram = base::Histogram::FactoryGet("h2", 1, 100, 10, 0);
+ histogram->Add(2);
+ WriteMetricsFileAtTime(metrics_files.GetPath().AppendASCII("b2.pma"),
+ allocator, base::Time::Now());
+
+ // The global allocator has to be detached here so that no metrics created
+ // by code called below get stored in it as that would make for potential
+ // use-after-free operations if that code is called again.
+ base::GlobalHistogramAllocator::ReleaseForTesting();
+
+ // Register the file and allow the "checker" task to run.
+ FileMetricsProvider::Params params(
+ metrics_files.GetPath(),
+ FileMetricsProvider::SOURCE_HISTOGRAMS_ATOMIC_DIR,
+ FileMetricsProvider::ASSOCIATE_CURRENT_RUN, kMetricsName);
+ params.max_age = base::TimeDelta::FromMinutes(30);
+ provider()->RegisterSource(params);
+
+ // Only b2, with 2 histograms, should be read.
+ OnDidCreateMetricsLog();
+ RunTasks();
+ EXPECT_EQ(2U, GetSnapshotHistogramCount());
+ OnDidCreateMetricsLog();
+ RunTasks();
+ EXPECT_EQ(0U, GetSnapshotHistogramCount());
+
+ EXPECT_FALSE(base::PathExists(metrics_files.GetPath().AppendASCII("a1.pma")));
+ EXPECT_FALSE(base::PathExists(metrics_files.GetPath().AppendASCII("b2.pma")));
+}
+
+TEST_P(FileMetricsProviderTest, AccessCountLimitedDirectory) {
+ ASSERT_FALSE(PathExists(metrics_file()));
+
+ base::GlobalHistogramAllocator::CreateWithLocalMemory(64 << 10, 0,
+ kMetricsName);
+ base::GlobalHistogramAllocator* allocator =
+ base::GlobalHistogramAllocator::Get();
+ base::HistogramBase* histogram;
+
+ // Create one old file and one new file.
+ base::ScopedTempDir metrics_files;
+ EXPECT_TRUE(metrics_files.CreateUniqueTempDir());
+ histogram = base::Histogram::FactoryGet("h1", 1, 100, 10, 0);
+ histogram->Add(1);
+ WriteMetricsFileAtTime(metrics_files.GetPath().AppendASCII("a1.pma"),
+ allocator,
+ base::Time::Now() - base::TimeDelta::FromHours(1));
+
+ histogram = base::Histogram::FactoryGet("h2", 1, 100, 10, 0);
+ histogram->Add(2);
+ WriteMetricsFileAtTime(metrics_files.GetPath().AppendASCII("b2.pma"),
+ allocator, base::Time::Now());
+
+ // The global allocator has to be detached here so that no metrics created
+ // by code called below get stored in it as that would make for potential
+ // use-after-free operations if that code is called again.
+ base::GlobalHistogramAllocator::ReleaseForTesting();
+
+ // Register the file and allow the "checker" task to run.
+ FileMetricsProvider::Params params(
+ metrics_files.GetPath(),
+ FileMetricsProvider::SOURCE_HISTOGRAMS_ATOMIC_DIR,
+ FileMetricsProvider::ASSOCIATE_CURRENT_RUN, kMetricsName);
+ params.max_dir_files = 1;
+ provider()->RegisterSource(params);
+
+ // Only b2, with 2 histograms, should be read.
+ OnDidCreateMetricsLog();
+ RunTasks();
+ EXPECT_EQ(2U, GetSnapshotHistogramCount());
+ OnDidCreateMetricsLog();
+ RunTasks();
+ EXPECT_EQ(0U, GetSnapshotHistogramCount());
+
+ EXPECT_FALSE(base::PathExists(metrics_files.GetPath().AppendASCII("a1.pma")));
+ EXPECT_FALSE(base::PathExists(metrics_files.GetPath().AppendASCII("b2.pma")));
+}
+
+TEST_P(FileMetricsProviderTest, AccessSizeLimitedDirectory) {
+ // This only works with large files that are big enough to count.
+ if (!create_large_files_)
+ return;
+
+ ASSERT_FALSE(PathExists(metrics_file()));
+
+ size_t file_size_kib = 64;
+ base::GlobalHistogramAllocator::CreateWithLocalMemory(file_size_kib << 10, 0,
+ kMetricsName);
+ base::GlobalHistogramAllocator* allocator =
+ base::GlobalHistogramAllocator::Get();
+ base::HistogramBase* histogram;
+
+ // Create one old file and one new file.
+ base::ScopedTempDir metrics_files;
+ EXPECT_TRUE(metrics_files.CreateUniqueTempDir());
+ histogram = base::Histogram::FactoryGet("h1", 1, 100, 10, 0);
+ histogram->Add(1);
+ WriteMetricsFileAtTime(metrics_files.GetPath().AppendASCII("a1.pma"),
+ allocator,
+ base::Time::Now() - base::TimeDelta::FromHours(1));
+
+ histogram = base::Histogram::FactoryGet("h2", 1, 100, 10, 0);
+ histogram->Add(2);
+ WriteMetricsFileAtTime(metrics_files.GetPath().AppendASCII("b2.pma"),
+ allocator, base::Time::Now());
+
+ // The global allocator has to be detached here so that no metrics created
+ // by code called below get stored in it as that would make for potential
+ // use-after-free operations if that code is called again.
+ base::GlobalHistogramAllocator::ReleaseForTesting();
+
+ // Register the file and allow the "checker" task to run.
+ FileMetricsProvider::Params params(
+ metrics_files.GetPath(),
+ FileMetricsProvider::SOURCE_HISTOGRAMS_ATOMIC_DIR,
+ FileMetricsProvider::ASSOCIATE_CURRENT_RUN, kMetricsName);
+ params.max_dir_kib = file_size_kib + 1;
+ provider()->RegisterSource(params);
+
+ // Only b2, with 2 histograms, should be read.
+ OnDidCreateMetricsLog();
+ RunTasks();
+ EXPECT_EQ(2U, GetSnapshotHistogramCount());
+ OnDidCreateMetricsLog();
+ RunTasks();
+ EXPECT_EQ(0U, GetSnapshotHistogramCount());
+
+ EXPECT_FALSE(base::PathExists(metrics_files.GetPath().AppendASCII("a1.pma")));
+ EXPECT_FALSE(base::PathExists(metrics_files.GetPath().AppendASCII("b2.pma")));
+}
+
+TEST_P(FileMetricsProviderTest, AccessFilteredDirectory) {
+ ASSERT_FALSE(PathExists(metrics_file()));
+
+ base::GlobalHistogramAllocator::CreateWithLocalMemory(64 << 10, 0,
+ kMetricsName);
+ base::GlobalHistogramAllocator* allocator =
+ base::GlobalHistogramAllocator::Get();
+ base::HistogramBase* histogram;
+
+ // Create files starting with a timestamp a few minutes back.
+ base::Time base_time = base::Time::Now() - base::TimeDelta::FromMinutes(10);
+
+ // Create some files in an odd order. The files are "touched" back in time to
+ // ensure that each file has a later timestamp on disk than the previous one.
+ base::ScopedTempDir metrics_files;
+ EXPECT_TRUE(metrics_files.CreateUniqueTempDir());
+
+ histogram = base::Histogram::FactoryGet("h1", 1, 100, 10, 0);
+ histogram->Add(1);
+ WriteMetricsFileAtTime(metrics_files.GetPath().AppendASCII("a1.pma"),
+ allocator,
+ base_time + base::TimeDelta::FromMinutes(1));
+
+ histogram = base::Histogram::FactoryGet("h2", 1, 100, 10, 0);
+ histogram->Add(2);
+ WriteMetricsFileAtTime(metrics_files.GetPath().AppendASCII("c2.pma"),
+ allocator,
+ base_time + base::TimeDelta::FromMinutes(2));
+
+ histogram = base::Histogram::FactoryGet("h3", 1, 100, 10, 0);
+ histogram->Add(3);
+ WriteMetricsFileAtTime(metrics_files.GetPath().AppendASCII("b3.pma"),
+ allocator,
+ base_time + base::TimeDelta::FromMinutes(3));
+
+ histogram = base::Histogram::FactoryGet("h4", 1, 100, 10, 0);
+ histogram->Add(3);
+ WriteMetricsFileAtTime(metrics_files.GetPath().AppendASCII("d4.pma"),
+ allocator,
+ base_time + base::TimeDelta::FromMinutes(4));
+
+ base::TouchFile(metrics_files.GetPath().AppendASCII("b3.pma"),
+ base_time + base::TimeDelta::FromMinutes(5),
+ base_time + base::TimeDelta::FromMinutes(5));
+
+ // The global allocator has to be detached here so that no metrics created
+ // by code called below get stored in it as that would make for potential
+ // use-after-free operations if that code is called again.
+ base::GlobalHistogramAllocator::ReleaseForTesting();
+
+ // Register the file and allow the "checker" task to run.
+ FileMetricsProvider::Params params(
+ metrics_files.GetPath(),
+ FileMetricsProvider::SOURCE_HISTOGRAMS_ATOMIC_DIR,
+ FileMetricsProvider::ASSOCIATE_CURRENT_RUN, kMetricsName);
+ const FileMetricsProvider::FilterAction actions[] = {
+ FileMetricsProvider::FILTER_PROCESS_FILE, // a1
+ FileMetricsProvider::FILTER_TRY_LATER, // c2
+ FileMetricsProvider::FILTER_SKIP_FILE, // d4
+ FileMetricsProvider::FILTER_PROCESS_FILE, // b3
+ FileMetricsProvider::FILTER_PROCESS_FILE}; // c2 (again)
+ SetFilterActions(¶ms, actions, arraysize(actions));
+ provider()->RegisterSource(params);
+
+ // Files could come out in the order: a1, b3, c2. They are recognizeable
+ // by the number of histograms contained within each.
+ const uint32_t expect_order[] = {1, 3, 2, 0};
+ for (size_t i = 0; i < arraysize(expect_order); ++i) {
+ // Record embedded snapshots via snapshot-manager.
+ OnDidCreateMetricsLog();
+ RunTasks();
+ EXPECT_EQ(expect_order[i], GetSnapshotHistogramCount()) << i;
+ }
+
+ EXPECT_FALSE(base::PathExists(metrics_files.GetPath().AppendASCII("a1.pma")));
+ EXPECT_FALSE(base::PathExists(metrics_files.GetPath().AppendASCII("c2.pma")));
+ EXPECT_FALSE(base::PathExists(metrics_files.GetPath().AppendASCII("b3.pma")));
+ EXPECT_FALSE(base::PathExists(metrics_files.GetPath().AppendASCII("d4.pma")));
+}
+
+TEST_P(FileMetricsProviderTest, AccessReadWriteMetrics) {
+ // Create a global histogram allocator that maps to a file.
+ ASSERT_FALSE(PathExists(metrics_file()));
+ base::GlobalHistogramAllocator::CreateWithFile(
+ metrics_file(),
+ create_large_files_ ? kLargeFileSize : kSmallFileSize,
+ 0, kMetricsName);
+ CreateGlobalHistograms(2);
+ ASSERT_TRUE(PathExists(metrics_file()));
+ base::HistogramBase* h0 = GetCreatedHistogram(0);
+ base::HistogramBase* h1 = GetCreatedHistogram(1);
+ DCHECK(h0);
+ DCHECK(h1);
+ std::unique_ptr<base::PersistentHistogramAllocator> histogram_allocator =
+ base::GlobalHistogramAllocator::ReleaseForTesting();
+
+ // Register the file and allow the "checker" task to run.
+ provider()->RegisterSource(FileMetricsProvider::Params(
+ metrics_file(), FileMetricsProvider::SOURCE_HISTOGRAMS_ACTIVE_FILE,
+ FileMetricsProvider::ASSOCIATE_CURRENT_RUN));
+
+ // Record embedded snapshots via snapshot-manager.
+ OnDidCreateMetricsLog();
+ RunTasks();
+ EXPECT_EQ(2U, GetSnapshotHistogramCount());
+ EXPECT_TRUE(base::PathExists(metrics_file()));
+
+ // Make sure a second call to the snapshot-recorder doesn't break anything.
+ OnDidCreateMetricsLog();
+ RunTasks();
+ EXPECT_EQ(0U, GetSnapshotHistogramCount());
+ EXPECT_TRUE(base::PathExists(metrics_file()));
+
+ // Change a histogram and ensure that it's counted.
+ h0->Add(0);
+ EXPECT_EQ(1U, GetSnapshotHistogramCount());
+ EXPECT_TRUE(base::PathExists(metrics_file()));
+
+ // Change the other histogram and verify.
+ h1->Add(11);
+ EXPECT_EQ(1U, GetSnapshotHistogramCount());
+ EXPECT_TRUE(base::PathExists(metrics_file()));
+}
+
+TEST_P(FileMetricsProviderTest, AccessInitialMetrics) {
+ ASSERT_FALSE(PathExists(metrics_file()));
+ CreateMetricsFileWithHistograms(2);
+
+ // Register the file and allow the "checker" task to run.
+ ASSERT_TRUE(PathExists(metrics_file()));
+ provider()->RegisterSource(FileMetricsProvider::Params(
+ metrics_file(), FileMetricsProvider::SOURCE_HISTOGRAMS_ATOMIC_FILE,
+ FileMetricsProvider::ASSOCIATE_PREVIOUS_RUN, kMetricsName));
+
+ // Record embedded snapshots via snapshot-manager.
+ ASSERT_TRUE(HasPreviousSessionData());
+ RunTasks();
+ {
+ HistogramFlattenerDeltaRecorder flattener;
+ base::HistogramSnapshotManager snapshot_manager(&flattener);
+ RecordInitialHistogramSnapshots(&snapshot_manager);
+ EXPECT_EQ(2U, flattener.GetRecordedDeltaHistogramNames().size());
+ }
+ EXPECT_TRUE(base::PathExists(metrics_file()));
+ OnDidCreateMetricsLog();
+ RunTasks();
+ EXPECT_FALSE(base::PathExists(metrics_file()));
+
+ // A run for normal histograms should produce nothing.
+ CreateMetricsFileWithHistograms(2);
+ OnDidCreateMetricsLog();
+ RunTasks();
+ EXPECT_EQ(0U, GetSnapshotHistogramCount());
+ EXPECT_TRUE(base::PathExists(metrics_file()));
+ OnDidCreateMetricsLog();
+ RunTasks();
+ EXPECT_TRUE(base::PathExists(metrics_file()));
+}
+
+TEST_P(FileMetricsProviderTest, AccessEmbeddedProfileMetricsWithoutProfile) {
+ ASSERT_FALSE(PathExists(metrics_file()));
+ CreateMetricsFileWithHistograms(2);
+
+ // Register the file and allow the "checker" task to run.
+ ASSERT_TRUE(PathExists(metrics_file()));
+ provider()->RegisterSource(FileMetricsProvider::Params(
+ metrics_file(), FileMetricsProvider::SOURCE_HISTOGRAMS_ATOMIC_FILE,
+ FileMetricsProvider::ASSOCIATE_INTERNAL_PROFILE, kMetricsName));
+
+ // Record embedded snapshots via snapshot-manager.
+ OnDidCreateMetricsLog();
+ RunTasks();
+ {
+ HistogramFlattenerDeltaRecorder flattener;
+ base::HistogramSnapshotManager snapshot_manager(&flattener);
+ SystemProfileProto profile;
+
+ // A read of metrics with internal profiles should return nothing.
+ EXPECT_FALSE(ProvideIndependentMetrics(&profile, &snapshot_manager));
+ }
+ EXPECT_TRUE(base::PathExists(metrics_file()));
+ OnDidCreateMetricsLog();
+ RunTasks();
+ EXPECT_FALSE(base::PathExists(metrics_file()));
+}
+
+TEST_P(FileMetricsProviderTest, AccessEmbeddedProfileMetricsWithProfile) {
+ ASSERT_FALSE(PathExists(metrics_file()));
+ CreateMetricsFileWithHistograms(
+ metrics_file(), base::Time::Now(), 2,
+ [](base::PersistentHistogramAllocator* allocator) {
+ SystemProfileProto profile_proto;
+ SystemProfileProto::FieldTrial* trial = profile_proto.add_field_trial();
+ trial->set_name_id(123);
+ trial->set_group_id(456);
+
+ PersistentSystemProfile persistent_profile;
+ persistent_profile.RegisterPersistentAllocator(
+ allocator->memory_allocator());
+ persistent_profile.SetSystemProfile(profile_proto, true);
+ });
+
+ // Register the file and allow the "checker" task to run.
+ ASSERT_TRUE(PathExists(metrics_file()));
+ provider()->RegisterSource(FileMetricsProvider::Params(
+ metrics_file(), FileMetricsProvider::SOURCE_HISTOGRAMS_ATOMIC_FILE,
+ FileMetricsProvider::ASSOCIATE_INTERNAL_PROFILE, kMetricsName));
+
+ // Record embedded snapshots via snapshot-manager.
+ OnDidCreateMetricsLog();
+ RunTasks();
+ {
+ HistogramFlattenerDeltaRecorder flattener;
+ base::HistogramSnapshotManager snapshot_manager(&flattener);
+ RecordInitialHistogramSnapshots(&snapshot_manager);
+ EXPECT_EQ(0U, flattener.GetRecordedDeltaHistogramNames().size());
+
+ // A read of metrics with internal profiles should return one result.
+ SystemProfileProto profile;
+ EXPECT_TRUE(ProvideIndependentMetrics(&profile, &snapshot_manager));
+ EXPECT_FALSE(ProvideIndependentMetrics(&profile, &snapshot_manager));
+ }
+ EXPECT_TRUE(base::PathExists(metrics_file()));
+ OnDidCreateMetricsLog();
+ RunTasks();
+ EXPECT_FALSE(base::PathExists(metrics_file()));
+}
+
+TEST_P(FileMetricsProviderTest, AccessEmbeddedFallbackMetricsWithoutProfile) {
+ ASSERT_FALSE(PathExists(metrics_file()));
+ CreateMetricsFileWithHistograms(2);
+
+ // Register the file and allow the "checker" task to run.
+ ASSERT_TRUE(PathExists(metrics_file()));
+ provider()->RegisterSource(FileMetricsProvider::Params(
+ metrics_file(), FileMetricsProvider::SOURCE_HISTOGRAMS_ATOMIC_FILE,
+ FileMetricsProvider::ASSOCIATE_INTERNAL_PROFILE_OR_PREVIOUS_RUN,
+ kMetricsName));
+
+ // Record embedded snapshots via snapshot-manager.
+ ASSERT_TRUE(HasPreviousSessionData());
+ RunTasks();
+ {
+ HistogramFlattenerDeltaRecorder flattener;
+ base::HistogramSnapshotManager snapshot_manager(&flattener);
+ RecordInitialHistogramSnapshots(&snapshot_manager);
+ EXPECT_EQ(2U, flattener.GetRecordedDeltaHistogramNames().size());
+
+ // A read of metrics with internal profiles should return nothing.
+ SystemProfileProto profile;
+ EXPECT_FALSE(ProvideIndependentMetrics(&profile, &snapshot_manager));
+ }
+ EXPECT_TRUE(base::PathExists(metrics_file()));
+ OnDidCreateMetricsLog();
+ RunTasks();
+ EXPECT_FALSE(base::PathExists(metrics_file()));
+}
+
+TEST_P(FileMetricsProviderTest, AccessEmbeddedFallbackMetricsWithProfile) {
+ ASSERT_FALSE(PathExists(metrics_file()));
+ CreateMetricsFileWithHistograms(
+ metrics_file(), base::Time::Now(), 2,
+ [](base::PersistentHistogramAllocator* allocator) {
+ SystemProfileProto profile_proto;
+ SystemProfileProto::FieldTrial* trial = profile_proto.add_field_trial();
+ trial->set_name_id(123);
+ trial->set_group_id(456);
+
+ PersistentSystemProfile persistent_profile;
+ persistent_profile.RegisterPersistentAllocator(
+ allocator->memory_allocator());
+ persistent_profile.SetSystemProfile(profile_proto, true);
+ });
+
+ // Register the file and allow the "checker" task to run.
+ ASSERT_TRUE(PathExists(metrics_file()));
+ provider()->RegisterSource(FileMetricsProvider::Params(
+ metrics_file(), FileMetricsProvider::SOURCE_HISTOGRAMS_ATOMIC_FILE,
+ FileMetricsProvider::ASSOCIATE_INTERNAL_PROFILE_OR_PREVIOUS_RUN,
+ kMetricsName));
+
+ // Record embedded snapshots via snapshot-manager.
+ EXPECT_FALSE(HasPreviousSessionData());
+ RunTasks();
+ {
+ HistogramFlattenerDeltaRecorder flattener;
+ base::HistogramSnapshotManager snapshot_manager(&flattener);
+ RecordInitialHistogramSnapshots(&snapshot_manager);
+ EXPECT_EQ(0U, flattener.GetRecordedDeltaHistogramNames().size());
+
+ // A read of metrics with internal profiles should return one result.
+ SystemProfileProto profile;
+ EXPECT_TRUE(ProvideIndependentMetrics(&profile, &snapshot_manager));
+ EXPECT_FALSE(ProvideIndependentMetrics(&profile, &snapshot_manager));
+ }
+ EXPECT_TRUE(base::PathExists(metrics_file()));
+ OnDidCreateMetricsLog();
+ RunTasks();
+ EXPECT_FALSE(base::PathExists(metrics_file()));
+}
+
+TEST_P(FileMetricsProviderTest, AccessEmbeddedProfileMetricsFromDir) {
+ const int file_count = 3;
+ base::Time file_base_time = base::Time::Now();
+ std::vector<base::FilePath> file_names;
+ for (int i = 0; i < file_count; ++i) {
+ CreateMetricsFileWithHistograms(
+ metrics_file(), base::Time::Now(), 2,
+ [](base::PersistentHistogramAllocator* allocator) {
+ SystemProfileProto profile_proto;
+ SystemProfileProto::FieldTrial* trial =
+ profile_proto.add_field_trial();
+ trial->set_name_id(123);
+ trial->set_group_id(456);
+
+ PersistentSystemProfile persistent_profile;
+ persistent_profile.RegisterPersistentAllocator(
+ allocator->memory_allocator());
+ persistent_profile.SetSystemProfile(profile_proto, true);
+ });
+ ASSERT_TRUE(PathExists(metrics_file()));
+ char new_name[] = "hX";
+ new_name[1] = '1' + i;
+ base::FilePath file_name = temp_dir().AppendASCII(new_name).AddExtension(
+ base::PersistentMemoryAllocator::kFileExtension);
+ base::Time file_time =
+ file_base_time - base::TimeDelta::FromMinutes(file_count - i);
+ base::TouchFile(metrics_file(), file_time, file_time);
+ base::Move(metrics_file(), file_name);
+ file_names.push_back(std::move(file_name));
+ }
+
+ // Register the file and allow the "checker" task to run.
+ provider()->RegisterSource(FileMetricsProvider::Params(
+ temp_dir(), FileMetricsProvider::SOURCE_HISTOGRAMS_ATOMIC_DIR,
+ FileMetricsProvider::ASSOCIATE_INTERNAL_PROFILE));
+
+ OnDidCreateMetricsLog();
+ RunTasks();
+
+ // A read of metrics with internal profiles should return one result.
+ HistogramFlattenerDeltaRecorder flattener;
+ base::HistogramSnapshotManager snapshot_manager(&flattener);
+ SystemProfileProto profile;
+ for (int i = 0; i < file_count; ++i) {
+ EXPECT_TRUE(ProvideIndependentMetrics(&profile, &snapshot_manager)) << i;
+ RunTasks();
+ }
+ EXPECT_FALSE(ProvideIndependentMetrics(&profile, &snapshot_manager));
+
+ OnDidCreateMetricsLog();
+ RunTasks();
+ for (const auto& file_name : file_names)
+ EXPECT_FALSE(base::PathExists(file_name));
+}
+
+} // namespace metrics
diff --git a/components/metrics/generate_expired_histograms_array.gni b/components/metrics/generate_expired_histograms_array.gni
new file mode 100644
index 0000000..b452246
--- /dev/null
+++ b/components/metrics/generate_expired_histograms_array.gni
@@ -0,0 +1,52 @@
+# Copyright 2017 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Runs the resources map generation script other the given header files to
+# produce an output file and a source_set to build it.
+#
+# Parameters:
+# inputs:
+# List of file name to read. Each file should be a .xml file with
+# histogram descriptions.
+#
+# namespace (optional):
+# Namespace in which the generated code should be scoped. If left empty,
+# the code will be in the global namespace.
+#
+# header_filename:
+# Name of the generated header file.
+#
+# major_branch_date_filepath:
+# A path to the file with the base date.
+#
+# milestone_filepath:
+# A path to the file with the milestone information.
+#
+template("generate_expired_histograms_array") {
+ action(target_name) {
+ header_filename = "$target_gen_dir/" + invoker.header_filename
+
+ script = "//tools/metrics/histograms/generate_expired_histograms_array.py"
+ outputs = [
+ header_filename,
+ ]
+
+ inputs = invoker.inputs
+ major_branch_date_filepath = invoker.major_branch_date_filepath
+ milestone_filepath = invoker.milestone_filepath
+
+ args = []
+
+ if (defined(invoker.namespace) && invoker.namespace != "") {
+ args += [ "-n" + invoker.namespace ]
+ }
+
+ args += [
+ "-o" + rebase_path(root_gen_dir, root_build_dir),
+ "-H" + rebase_path(header_filename, root_gen_dir),
+ "-d" + rebase_path(major_branch_date_filepath, root_build_dir),
+ "-m" + rebase_path(milestone_filepath, root_build_dir),
+ ] + rebase_path(inputs, root_build_dir)
+ }
+}
diff --git a/components/metrics/gpu/DEPS b/components/metrics/gpu/DEPS
new file mode 100644
index 0000000..c2ff8a0
--- /dev/null
+++ b/components/metrics/gpu/DEPS
@@ -0,0 +1,4 @@
+include_rules = [
+ "+content/public/browser",
+ "+gpu/config",
+]
diff --git a/components/metrics/gpu/gpu_metrics_provider.cc b/components/metrics/gpu/gpu_metrics_provider.cc
new file mode 100644
index 0000000..53c15fb
--- /dev/null
+++ b/components/metrics/gpu/gpu_metrics_provider.cc
@@ -0,0 +1,37 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/gpu/gpu_metrics_provider.h"
+
+#include "content/public/browser/gpu_data_manager.h"
+#include "gpu/config/gpu_info.h"
+#include "third_party/metrics_proto/system_profile.pb.h"
+
+namespace metrics {
+
+GPUMetricsProvider::GPUMetricsProvider() {
+}
+
+GPUMetricsProvider::~GPUMetricsProvider() {
+}
+
+void GPUMetricsProvider::ProvideSystemProfileMetrics(
+ SystemProfileProto* system_profile_proto) {
+ SystemProfileProto::Hardware* hardware =
+ system_profile_proto->mutable_hardware();
+
+ const gpu::GPUInfo& gpu_info =
+ content::GpuDataManager::GetInstance()->GetGPUInfo();
+ const gpu::GPUInfo::GPUDevice& active_gpu = gpu_info.active_gpu();
+ SystemProfileProto::Hardware::Graphics* gpu =
+ hardware->mutable_gpu();
+ gpu->set_vendor_id(active_gpu.vendor_id);
+ gpu->set_device_id(active_gpu.device_id);
+ gpu->set_driver_version(active_gpu.driver_version);
+ gpu->set_driver_date(active_gpu.driver_date);
+ gpu->set_gl_vendor(gpu_info.gl_vendor);
+ gpu->set_gl_renderer(gpu_info.gl_renderer);
+}
+
+} // namespace metrics
diff --git a/components/metrics/gpu/gpu_metrics_provider.h b/components/metrics/gpu/gpu_metrics_provider.h
new file mode 100644
index 0000000..581c765
--- /dev/null
+++ b/components/metrics/gpu/gpu_metrics_provider.h
@@ -0,0 +1,29 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_GPU_GPU_METRICS_PROVIDER_H_
+#define COMPONENTS_METRICS_GPU_GPU_METRICS_PROVIDER_H_
+
+#include "base/macros.h"
+#include "components/metrics/metrics_provider.h"
+
+namespace metrics {
+
+// GPUMetricsProvider provides GPU-related metrics.
+class GPUMetricsProvider : public MetricsProvider {
+ public:
+ GPUMetricsProvider();
+ ~GPUMetricsProvider() override;
+
+ // MetricsProvider:
+ void ProvideSystemProfileMetrics(
+ SystemProfileProto* system_profile_proto) override;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(GPUMetricsProvider);
+};
+
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_GPU_GPU_METRICS_PROVIDER_H_
diff --git a/components/metrics/histogram_encoder.cc b/components/metrics/histogram_encoder.cc
new file mode 100644
index 0000000..4d7d945
--- /dev/null
+++ b/components/metrics/histogram_encoder.cc
@@ -0,0 +1,57 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/histogram_encoder.h"
+
+#include <memory>
+#include <string>
+
+#include "base/metrics/histogram.h"
+#include "base/metrics/histogram_samples.h"
+#include "base/metrics/metrics_hashes.h"
+
+using base::SampleCountIterator;
+
+namespace metrics {
+
+void EncodeHistogramDelta(const std::string& histogram_name,
+ const base::HistogramSamples& snapshot,
+ ChromeUserMetricsExtension* uma_proto) {
+ DCHECK_NE(0, snapshot.TotalCount());
+ DCHECK(uma_proto);
+
+ // We will ignore the MAX_INT/infinite value in the last element of range[].
+
+ HistogramEventProto* histogram_proto = uma_proto->add_histogram_event();
+ histogram_proto->set_name_hash(base::HashMetricName(histogram_name));
+ if (snapshot.sum() != 0)
+ histogram_proto->set_sum(snapshot.sum());
+
+ for (std::unique_ptr<SampleCountIterator> it = snapshot.Iterator();
+ !it->Done(); it->Next()) {
+ base::Histogram::Sample min;
+ int64_t max;
+ base::Histogram::Count count;
+ it->Get(&min, &max, &count);
+ HistogramEventProto::Bucket* bucket = histogram_proto->add_bucket();
+ bucket->set_min(min);
+ bucket->set_max(max);
+ // Note: The default for count is 1 in the proto, so omit it in that case.
+ if (count != 1)
+ bucket->set_count(count);
+ }
+
+ // Omit fields to save space (see rules in histogram_event.proto comments).
+ for (int i = 0; i < histogram_proto->bucket_size(); ++i) {
+ HistogramEventProto::Bucket* bucket = histogram_proto->mutable_bucket(i);
+ if (i + 1 < histogram_proto->bucket_size() &&
+ bucket->max() == histogram_proto->bucket(i + 1).min()) {
+ bucket->clear_max();
+ } else if (bucket->max() == bucket->min() + 1) {
+ bucket->clear_min();
+ }
+ }
+}
+
+} // namespace metrics
diff --git a/components/metrics/histogram_encoder.h b/components/metrics/histogram_encoder.h
new file mode 100644
index 0000000..332e907
--- /dev/null
+++ b/components/metrics/histogram_encoder.h
@@ -0,0 +1,29 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file defines an utility function that records any changes in a given
+// histogram for transmission.
+
+#ifndef COMPONENTS_METRICS_HISTOGRAM_ENCODER_H_
+#define COMPONENTS_METRICS_HISTOGRAM_ENCODER_H_
+
+#include <string>
+
+#include "third_party/metrics_proto/chrome_user_metrics_extension.pb.h"
+
+namespace base {
+class HistogramSamples;
+}
+
+namespace metrics {
+
+// Record any changes (histogram deltas of counts from |snapshot|) into
+// |uma_proto| for the given histogram (|histogram_name|).
+void EncodeHistogramDelta(const std::string& histogram_name,
+ const base::HistogramSamples& snapshot,
+ ChromeUserMetricsExtension* uma_proto);
+
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_HISTOGRAM_ENCODER_H_
diff --git a/components/metrics/histogram_encoder_unittest.cc b/components/metrics/histogram_encoder_unittest.cc
new file mode 100644
index 0000000..dfe7f84
--- /dev/null
+++ b/components/metrics/histogram_encoder_unittest.cc
@@ -0,0 +1,71 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/histogram_encoder.h"
+
+#include <string>
+
+#include "base/metrics/bucket_ranges.h"
+#include "base/metrics/sample_vector.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace metrics {
+
+TEST(HistogramEncoder, HistogramBucketFields) {
+ // Create buckets: 1-5, 5-7, 7-8, 8-9, 9-10, 10-11, 11-12.
+ base::BucketRanges ranges(8);
+ ranges.set_range(0, 1);
+ ranges.set_range(1, 5);
+ ranges.set_range(2, 7);
+ ranges.set_range(3, 8);
+ ranges.set_range(4, 9);
+ ranges.set_range(5, 10);
+ ranges.set_range(6, 11);
+ ranges.set_range(7, 12);
+
+ base::SampleVector samples(1, &ranges);
+ samples.Accumulate(3, 1); // Bucket 1-5.
+ samples.Accumulate(6, 1); // Bucket 5-7.
+ samples.Accumulate(8, 1); // Bucket 8-9. (7-8 skipped)
+ samples.Accumulate(10, 1); // Bucket 10-11. (9-10 skipped)
+ samples.Accumulate(11, 1); // Bucket 11-12.
+
+ ChromeUserMetricsExtension uma_proto;
+ EncodeHistogramDelta("Test", samples, &uma_proto);
+
+ const HistogramEventProto& histogram_proto =
+ uma_proto.histogram_event(uma_proto.histogram_event_size() - 1);
+
+ // Buckets with samples: 1-5, 5-7, 8-9, 10-11, 11-12.
+ // Should become: 1-/, 5-7, /-9, 10-/, /-12.
+ ASSERT_EQ(5, histogram_proto.bucket_size());
+
+ // 1-5 becomes 1-/ (max is same as next min).
+ EXPECT_TRUE(histogram_proto.bucket(0).has_min());
+ EXPECT_FALSE(histogram_proto.bucket(0).has_max());
+ EXPECT_EQ(1, histogram_proto.bucket(0).min());
+
+ // 5-7 stays 5-7 (no optimization possible).
+ EXPECT_TRUE(histogram_proto.bucket(1).has_min());
+ EXPECT_TRUE(histogram_proto.bucket(1).has_max());
+ EXPECT_EQ(5, histogram_proto.bucket(1).min());
+ EXPECT_EQ(7, histogram_proto.bucket(1).max());
+
+ // 8-9 becomes /-9 (min is same as max - 1).
+ EXPECT_FALSE(histogram_proto.bucket(2).has_min());
+ EXPECT_TRUE(histogram_proto.bucket(2).has_max());
+ EXPECT_EQ(9, histogram_proto.bucket(2).max());
+
+ // 10-11 becomes 10-/ (both optimizations apply, omit max is prioritized).
+ EXPECT_TRUE(histogram_proto.bucket(3).has_min());
+ EXPECT_FALSE(histogram_proto.bucket(3).has_max());
+ EXPECT_EQ(10, histogram_proto.bucket(3).min());
+
+ // 11-12 becomes /-12 (last record must keep max, min is same as max - 1).
+ EXPECT_FALSE(histogram_proto.bucket(4).has_min());
+ EXPECT_TRUE(histogram_proto.bucket(4).has_max());
+ EXPECT_EQ(12, histogram_proto.bucket(4).max());
+}
+
+} // namespace metrics
diff --git a/components/metrics/log_decoder.cc b/components/metrics/log_decoder.cc
new file mode 100644
index 0000000..1754c27
--- /dev/null
+++ b/components/metrics/log_decoder.cc
@@ -0,0 +1,16 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/log_decoder.h"
+
+#include "third_party/zlib/google/compression_utils.h"
+
+namespace metrics {
+
+bool DecodeLogData(const std::string& compressed_log_data,
+ std::string* log_data) {
+ return compression::GzipUncompress(compressed_log_data, log_data);
+}
+
+} // namespace metrics
diff --git a/components/metrics/log_decoder.h b/components/metrics/log_decoder.h
new file mode 100644
index 0000000..c85037f
--- /dev/null
+++ b/components/metrics/log_decoder.h
@@ -0,0 +1,21 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_LOG_DECODER_H_
+#define COMPONENTS_METRICS_LOG_DECODER_H_
+
+#include <string>
+
+namespace metrics {
+
+// Other modules can call this function instead of directly calling gzip. This
+// prevents other modules from having to depend on zlib, or being aware of
+// metrics' use of gzip compression, which is a metrics implementation detail.
+// Returns true on success, false on failure.
+bool DecodeLogData(const std::string& compressed_log_data,
+ std::string* log_data);
+
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_LOG_DECODER_H_
diff --git a/components/metrics/log_store.h b/components/metrics/log_store.h
new file mode 100644
index 0000000..9dd1a62
--- /dev/null
+++ b/components/metrics/log_store.h
@@ -0,0 +1,49 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_LOG_STORE_H_
+#define COMPONENTS_METRICS_LOG_STORE_H_
+
+#include <string>
+
+namespace metrics {
+
+// Interface for local storage of serialized logs to be reported.
+// It allows consumers to check if there are logs to consume, consume them one
+// at a time by staging and discarding logs, and persist/load the whole set.
+class LogStore {
+ public:
+ // Returns true if there are any logs waiting to be uploaded.
+ virtual bool has_unsent_logs() const = 0;
+
+ // Returns true if there is a log that needs to be, or is being, uploaded.
+ virtual bool has_staged_log() const = 0;
+
+ // The text of the staged log, as a serialized protobuf.
+ // Will trigger a DCHECK if there is no staged log.
+ virtual const std::string& staged_log() const = 0;
+
+ // The SHA1 hash of the staged log.
+ // Will trigger a DCHECK if there is no staged log.
+ virtual const std::string& staged_log_hash() const = 0;
+
+ // Populates staged_log() with the next stored log to send.
+ // The order in which logs are staged is up to the implementor.
+ // The staged_log must remain the same even if additional logs are added.
+ // Should only be called if has_unsent_logs() is true.
+ virtual void StageNextLog() = 0;
+
+ // Discards the staged log.
+ virtual void DiscardStagedLog() = 0;
+
+ // Saves any unsent logs to persistent storage.
+ virtual void PersistUnsentLogs() const = 0;
+
+ // Loads unsent logs from persistent storage.
+ virtual void LoadPersistedUnsentLogs() = 0;
+};
+
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_LOG_STORE_H_
diff --git a/components/metrics/machine_id_provider.h b/components/metrics/machine_id_provider.h
new file mode 100644
index 0000000..b7a2704
--- /dev/null
+++ b/components/metrics/machine_id_provider.h
@@ -0,0 +1,37 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_MACHINE_ID_PROVIDER_H_
+#define COMPONENTS_METRICS_MACHINE_ID_PROVIDER_H_
+
+#include <string>
+
+#include "base/macros.h"
+
+namespace metrics {
+
+// Provides machine characteristics used as a machine id. The implementation is
+// platform specific. GetMachineId() must be called on a thread which allows
+// I/O. GetMachineId() must not be called if HasId() returns false on this
+// platform.
+class MachineIdProvider {
+ public:
+ // Returns true if this platform provides a non-empty GetMachineId(). This is
+ // useful to avoid an async call to GetMachineId() on platforms with no
+ // implementation.
+ static bool HasId();
+
+ // Get a string containing machine characteristics, to be used as a machine
+ // id. The implementation is platform specific, with a default implementation
+ // returning an empty string.
+ // The return value should not be stored to disk or transmitted.
+ static std::string GetMachineId();
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(MachineIdProvider);
+};
+
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_MACHINE_ID_PROVIDER_H_
diff --git a/components/metrics/machine_id_provider_stub.cc b/components/metrics/machine_id_provider_stub.cc
new file mode 100644
index 0000000..d747209
--- /dev/null
+++ b/components/metrics/machine_id_provider_stub.cc
@@ -0,0 +1,22 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/machine_id_provider.h"
+
+#include "base/logging.h"
+
+namespace metrics {
+
+// static
+bool MachineIdProvider::HasId() {
+ return false;
+}
+
+// static
+std::string MachineIdProvider::GetMachineId() {
+ NOTREACHED();
+ return std::string();
+}
+
+} // namespace metrics
diff --git a/components/metrics/machine_id_provider_win.cc b/components/metrics/machine_id_provider_win.cc
new file mode 100644
index 0000000..eb7b0d7
--- /dev/null
+++ b/components/metrics/machine_id_provider_win.cc
@@ -0,0 +1,99 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/machine_id_provider.h"
+
+#include <windows.h>
+#include <stdint.h>
+#include <winioctl.h>
+
+#include "base/base_paths.h"
+#include "base/files/file_path.h"
+#include "base/path_service.h"
+#include "base/threading/scoped_blocking_call.h"
+#include "base/win/scoped_handle.h"
+
+namespace metrics {
+
+// static
+bool MachineIdProvider::HasId() {
+ return true;
+}
+
+// On windows, the machine id is based on the serial number of the drive Chrome
+// is running from.
+// static
+std::string MachineIdProvider::GetMachineId() {
+ base::ScopedBlockingCall scoped_blocking_call(base::BlockingType::MAY_BLOCK);
+
+ // Use the program's path to get the drive used for the machine id. This means
+ // that whenever the underlying drive changes, it's considered a new machine.
+ // This is fine as we do not support migrating Chrome installs to new drives.
+ base::FilePath executable_path;
+
+ if (!base::PathService::Get(base::FILE_EXE, &executable_path)) {
+ NOTREACHED();
+ return std::string();
+ }
+
+ std::vector<base::FilePath::StringType> path_components;
+ executable_path.GetComponents(&path_components);
+ if (path_components.empty()) {
+ NOTREACHED();
+ return std::string();
+ }
+ base::FilePath::StringType drive_name = L"\\\\.\\" + path_components[0];
+
+ base::win::ScopedHandle drive_handle(
+ CreateFile(drive_name.c_str(), 0, FILE_SHARE_READ | FILE_SHARE_WRITE,
+ nullptr, OPEN_EXISTING, 0, nullptr));
+
+ STORAGE_PROPERTY_QUERY query = {};
+ query.PropertyId = StorageDeviceProperty;
+ query.QueryType = PropertyStandardQuery;
+
+ // Perform an initial query to get the number of bytes being returned.
+ DWORD bytes_returned;
+ STORAGE_DESCRIPTOR_HEADER header = {};
+ BOOL status = DeviceIoControl(
+ drive_handle.Get(), IOCTL_STORAGE_QUERY_PROPERTY, &query,
+ sizeof(STORAGE_PROPERTY_QUERY), &header,
+ sizeof(STORAGE_DESCRIPTOR_HEADER), &bytes_returned, nullptr);
+
+ if (!status)
+ return std::string();
+
+ // Query for the actual serial number.
+ std::vector<int8_t> output_buf(header.Size);
+ status =
+ DeviceIoControl(drive_handle.Get(), IOCTL_STORAGE_QUERY_PROPERTY, &query,
+ sizeof(STORAGE_PROPERTY_QUERY), &output_buf[0],
+ output_buf.size(), &bytes_returned, nullptr);
+
+ if (!status)
+ return std::string();
+
+ const STORAGE_DEVICE_DESCRIPTOR* device_descriptor =
+ reinterpret_cast<STORAGE_DEVICE_DESCRIPTOR*>(&output_buf[0]);
+
+ // The serial number is stored in the |output_buf| as a null-terminated
+ // string starting at the specified offset.
+ const DWORD offset = device_descriptor->SerialNumberOffset;
+ if (offset >= output_buf.size())
+ return std::string();
+
+ // Make sure that the null-terminator exists.
+ const std::vector<int8_t>::iterator serial_number_begin =
+ output_buf.begin() + offset;
+ const std::vector<int8_t>::iterator null_location =
+ std::find(serial_number_begin, output_buf.end(), '\0');
+ if (null_location == output_buf.end())
+ return std::string();
+
+ const char* serial_number =
+ reinterpret_cast<const char*>(&output_buf[offset]);
+
+ return std::string(serial_number);
+}
+} // namespace metrics
diff --git a/components/metrics/machine_id_provider_win_unittest.cc b/components/metrics/machine_id_provider_win_unittest.cc
new file mode 100644
index 0000000..6bcd0c4
--- /dev/null
+++ b/components/metrics/machine_id_provider_win_unittest.cc
@@ -0,0 +1,21 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/machine_id_provider.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace metrics {
+
+TEST(MachineIdProviderTest, GetId) {
+ EXPECT_TRUE(MachineIdProvider::HasId());
+
+ const std::string id1 = MachineIdProvider::GetMachineId();
+ EXPECT_NE(std::string(), id1);
+
+ const std::string id2 = MachineIdProvider::GetMachineId();
+ EXPECT_EQ(id1, id2);
+}
+
+} // namespace metrics
diff --git a/components/metrics/metrics_log.cc b/components/metrics/metrics_log.cc
new file mode 100644
index 0000000..39ad9b1
--- /dev/null
+++ b/components/metrics/metrics_log.cc
@@ -0,0 +1,350 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/metrics_log.h"
+
+#include <stddef.h>
+
+#include <algorithm>
+#include <string>
+
+#include "base/build_time.h"
+#include "base/cpu.h"
+#include "base/metrics/histogram_base.h"
+#include "base/metrics/histogram_flattener.h"
+#include "base/metrics/histogram_functions.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/metrics/histogram_samples.h"
+#include "base/metrics/histogram_snapshot_manager.h"
+#include "base/metrics/metrics_hashes.h"
+#include "base/strings/string_piece.h"
+#include "base/strings/stringprintf.h"
+#include "base/sys_info.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+#include "components/metrics/delegating_provider.h"
+#include "components/metrics/environment_recorder.h"
+#include "components/metrics/histogram_encoder.h"
+#include "components/metrics/metrics_pref_names.h"
+#include "components/metrics/metrics_provider.h"
+#include "components/metrics/metrics_service_client.h"
+#include "components/prefs/pref_registry_simple.h"
+#include "components/prefs/pref_service.h"
+#include "third_party/metrics_proto/histogram_event.pb.h"
+#include "third_party/metrics_proto/system_profile.pb.h"
+#include "third_party/metrics_proto/user_action_event.pb.h"
+
+#if defined(OS_ANDROID)
+#include "base/android/build_info.h"
+#endif
+
+#if defined(OS_WIN)
+#include <windows.h>
+#include "base/win/current_module.h"
+#endif
+
+using base::SampleCountIterator;
+
+namespace metrics {
+
+namespace {
+
+// A simple class to write histogram data to a log.
+class IndependentFlattener : public base::HistogramFlattener {
+ public:
+ explicit IndependentFlattener(MetricsLog* log) : log_(log) {}
+
+ // base::HistogramFlattener:
+ void RecordDelta(const base::HistogramBase& histogram,
+ const base::HistogramSamples& snapshot) override {
+ log_->RecordHistogramDelta(histogram.histogram_name(), snapshot);
+ }
+
+ private:
+ MetricsLog* const log_;
+
+ DISALLOW_COPY_AND_ASSIGN(IndependentFlattener);
+};
+
+// Any id less than 16 bytes is considered to be a testing id.
+bool IsTestingID(const std::string& id) {
+ return id.size() < 16;
+}
+
+} // namespace
+
+MetricsLog::MetricsLog(const std::string& client_id,
+ int session_id,
+ LogType log_type,
+ MetricsServiceClient* client)
+ : closed_(false),
+ log_type_(log_type),
+ client_(client),
+ creation_time_(base::TimeTicks::Now()),
+ has_environment_(false) {
+ if (IsTestingID(client_id))
+ uma_proto_.set_client_id(0);
+ else
+ uma_proto_.set_client_id(Hash(client_id));
+
+ uma_proto_.set_session_id(session_id);
+
+ const int32_t product = client_->GetProduct();
+ // Only set the product if it differs from the default value.
+ if (product != uma_proto_.product())
+ uma_proto_.set_product(product);
+
+ SystemProfileProto* system_profile = uma_proto()->mutable_system_profile();
+ RecordCoreSystemProfile(client_, system_profile);
+}
+
+MetricsLog::~MetricsLog() {
+}
+
+// static
+void MetricsLog::RegisterPrefs(PrefRegistrySimple* registry) {
+ EnvironmentRecorder::RegisterPrefs(registry);
+}
+
+// static
+uint64_t MetricsLog::Hash(const std::string& value) {
+ uint64_t hash = base::HashMetricName(value);
+
+ // The following log is VERY helpful when folks add some named histogram into
+ // the code, but forgot to update the descriptive list of histograms. When
+ // that happens, all we get to see (server side) is a hash of the histogram
+ // name. We can then use this logging to find out what histogram name was
+ // being hashed to a given MD5 value by just running the version of Chromium
+ // in question with --enable-logging.
+ DVLOG(1) << "Metrics: Hash numeric [" << value << "]=[" << hash << "]";
+
+ return hash;
+}
+
+// static
+int64_t MetricsLog::GetBuildTime() {
+ static int64_t integral_build_time = 0;
+ if (!integral_build_time)
+ integral_build_time = static_cast<int64_t>(base::GetBuildTime().ToTimeT());
+ return integral_build_time;
+}
+
+// static
+int64_t MetricsLog::GetCurrentTime() {
+ return (base::TimeTicks::Now() - base::TimeTicks()).InSeconds();
+}
+
+void MetricsLog::RecordUserAction(const std::string& key) {
+ DCHECK(!closed_);
+
+ UserActionEventProto* user_action = uma_proto_.add_user_action_event();
+ user_action->set_name_hash(Hash(key));
+ user_action->set_time_sec(GetCurrentTime());
+}
+
+void MetricsLog::RecordCoreSystemProfile(MetricsServiceClient* client,
+ SystemProfileProto* system_profile) {
+ system_profile->set_build_timestamp(metrics::MetricsLog::GetBuildTime());
+ system_profile->set_app_version(client->GetVersionString());
+ system_profile->set_channel(client->GetChannel());
+ system_profile->set_application_locale(client->GetApplicationLocale());
+
+#if defined(ADDRESS_SANITIZER)
+ system_profile->set_is_asan_build(true);
+#endif
+
+ metrics::SystemProfileProto::Hardware* hardware =
+ system_profile->mutable_hardware();
+#if !defined(OS_IOS) && !defined(STARBOARD)
+ // On iOS, OperatingSystemArchitecture() returns values like iPad4,4 which is
+ // not the actual CPU architecture. Don't set it until the API is fixed. See
+ // crbug.com/370104 for details.
+ hardware->set_cpu_architecture(base::SysInfo::OperatingSystemArchitecture());
+#endif
+ hardware->set_system_ram_mb(base::SysInfo::AmountOfPhysicalMemoryMB());
+ hardware->set_hardware_class(base::SysInfo::HardwareModelName());
+#if defined(OS_WIN)
+ hardware->set_dll_base(reinterpret_cast<uint64_t>(CURRENT_MODULE()));
+#endif
+
+ metrics::SystemProfileProto::OS* os = system_profile->mutable_os();
+// TODO(b/283256747): Remove when base::SysInfo is Starboardized.
+#if !defined(STARBOARD)
+ os->set_name(base::SysInfo::OperatingSystemName());
+ os->set_version(base::SysInfo::OperatingSystemVersion());
+#endif
+#if defined(OS_CHROMEOS)
+ os->set_kernel_version(base::SysInfo::KernelVersion());
+#elif defined(OS_ANDROID)
+ os->set_build_fingerprint(
+ base::android::BuildInfo::GetInstance()->android_build_fp());
+ std::string package_name = client->GetAppPackageName();
+ if (!package_name.empty() && package_name != "com.android.chrome")
+ system_profile->set_app_package_name(package_name);
+#endif
+}
+
+void MetricsLog::RecordHistogramDelta(const std::string& histogram_name,
+ const base::HistogramSamples& snapshot) {
+ DCHECK(!closed_);
+ EncodeHistogramDelta(histogram_name, snapshot, &uma_proto_);
+}
+
+void MetricsLog::RecordPreviousSessionData(
+ DelegatingProvider* delegating_provider) {
+ delegating_provider->ProvidePreviousSessionData(uma_proto());
+}
+
+void MetricsLog::RecordCurrentSessionData(
+ DelegatingProvider* delegating_provider,
+ base::TimeDelta incremental_uptime,
+ base::TimeDelta uptime) {
+ DCHECK(!closed_);
+ DCHECK(has_environment_);
+
+ // Record recent delta for critical stability metrics. We can't wait for a
+ // restart to gather these, as that delay biases our observation away from
+ // users that run happily for a looooong time. We send increments with each
+ // uma log upload, just as we send histogram data.
+ WriteRealtimeStabilityAttributes(incremental_uptime, uptime);
+
+ delegating_provider->ProvideCurrentSessionData(uma_proto());
+}
+
+void MetricsLog::WriteMetricsEnableDefault(EnableMetricsDefault metrics_default,
+ SystemProfileProto* system_profile) {
+ if (client_->IsReportingPolicyManaged()) {
+ // If it's managed, then it must be reporting, otherwise we wouldn't be
+ // sending metrics.
+ system_profile->set_uma_default_state(
+ SystemProfileProto_UmaDefaultState_POLICY_FORCED_ENABLED);
+ return;
+ }
+
+ switch (metrics_default) {
+ case EnableMetricsDefault::DEFAULT_UNKNOWN:
+ // Don't set the field if it's unknown.
+ break;
+ case EnableMetricsDefault::OPT_IN:
+ system_profile->set_uma_default_state(
+ SystemProfileProto_UmaDefaultState_OPT_IN);
+ break;
+ case EnableMetricsDefault::OPT_OUT:
+ system_profile->set_uma_default_state(
+ SystemProfileProto_UmaDefaultState_OPT_OUT);
+ }
+}
+
+void MetricsLog::WriteRealtimeStabilityAttributes(
+ base::TimeDelta incremental_uptime,
+ base::TimeDelta uptime) {
+ // Update the stats which are critical for real-time stability monitoring.
+ // Since these are "optional," only list ones that are non-zero, as the counts
+ // are aggregated (summed) server side.
+
+ SystemProfileProto::Stability* stability =
+ uma_proto()->mutable_system_profile()->mutable_stability();
+
+ const uint64_t incremental_uptime_sec = incremental_uptime.InSeconds();
+ if (incremental_uptime_sec)
+ stability->set_incremental_uptime_sec(incremental_uptime_sec);
+ const uint64_t uptime_sec = uptime.InSeconds();
+ if (uptime_sec)
+ stability->set_uptime_sec(uptime_sec);
+}
+
+const SystemProfileProto& MetricsLog::RecordEnvironment(
+ DelegatingProvider* delegating_provider) {
+ DCHECK(!has_environment_);
+ has_environment_ = true;
+
+ SystemProfileProto* system_profile = uma_proto()->mutable_system_profile();
+
+ WriteMetricsEnableDefault(client_->GetMetricsReportingDefaultState(),
+ system_profile);
+
+ std::string brand_code;
+ if (client_->GetBrand(&brand_code))
+ system_profile->set_brand_code(brand_code);
+
+// TODO(b/283255893): Remove when base::CPU is Starboardized.
+#if !defined(STARBOARD)
+ SystemProfileProto::Hardware::CPU* cpu =
+ system_profile->mutable_hardware()->mutable_cpu();
+ base::CPU cpu_info;
+ cpu->set_vendor_name(cpu_info.vendor_name());
+ cpu->set_signature(cpu_info.signature());
+ cpu->set_num_cores(base::SysInfo::NumberOfProcessors());
+#endif
+
+ delegating_provider->ProvideSystemProfileMetrics(system_profile);
+
+ return *system_profile;
+}
+
+bool MetricsLog::LoadIndependentMetrics(MetricsProvider* metrics_provider) {
+ SystemProfileProto* system_profile = uma_proto()->mutable_system_profile();
+ IndependentFlattener flattener(this);
+ base::HistogramSnapshotManager snapshot_manager(&flattener);
+
+ return metrics_provider->ProvideIndependentMetrics(system_profile,
+ &snapshot_manager);
+}
+
+bool MetricsLog::LoadSavedEnvironmentFromPrefs(PrefService* local_state,
+ std::string* app_version) {
+ DCHECK(!has_environment_);
+ has_environment_ = true;
+ app_version->clear();
+
+ SystemProfileProto* system_profile = uma_proto()->mutable_system_profile();
+ EnvironmentRecorder recorder(local_state);
+ bool success = recorder.LoadEnvironmentFromPrefs(system_profile);
+ if (success)
+ *app_version = system_profile->app_version();
+ return success;
+}
+
+void MetricsLog::CloseLog() {
+ DCHECK(!closed_);
+ closed_ = true;
+}
+
+void MetricsLog::TruncateEvents() {
+ DCHECK(!closed_);
+ if (uma_proto_.user_action_event_size() > internal::kUserActionEventLimit) {
+ UMA_HISTOGRAM_COUNTS_100000("UMA.TruncatedEvents.UserAction",
+ uma_proto_.user_action_event_size());
+ for (int i = internal::kUserActionEventLimit;
+ i < uma_proto_.user_action_event_size(); ++i) {
+ // No histograms.xml entry is added for this histogram because it uses an
+ // enum that is generated from actions.xml in our processing pipelines.
+ // Instead, a histogram description will also be produced in our
+ // pipelines.
+ base::UmaHistogramSparse(
+ "UMA.TruncatedEvents.UserAction.Type",
+ // Truncate the unsigned 64-bit hash to 31 bits, to make it a suitable
+ // histogram sample.
+ uma_proto_.user_action_event(i).name_hash() & 0x7fffffff);
+ }
+ uma_proto_.mutable_user_action_event()->DeleteSubrange(
+ internal::kUserActionEventLimit,
+ uma_proto_.user_action_event_size() - internal::kUserActionEventLimit);
+ }
+
+ if (uma_proto_.omnibox_event_size() > internal::kOmniboxEventLimit) {
+ UMA_HISTOGRAM_COUNTS_100000("UMA.TruncatedEvents.Omnibox",
+ uma_proto_.omnibox_event_size());
+ uma_proto_.mutable_omnibox_event()->DeleteSubrange(
+ internal::kOmniboxEventLimit,
+ uma_proto_.omnibox_event_size() - internal::kOmniboxEventLimit);
+ }
+}
+
+void MetricsLog::GetEncodedLog(std::string* encoded_log) {
+ DCHECK(closed_);
+ uma_proto_.SerializeToString(encoded_log);
+}
+
+} // namespace metrics
diff --git a/components/metrics/metrics_log.h b/components/metrics/metrics_log.h
new file mode 100644
index 0000000..4cedd66
--- /dev/null
+++ b/components/metrics/metrics_log.h
@@ -0,0 +1,186 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file defines a set of user experience metrics data recorded by
+// the MetricsService. This is the unit of data that is sent to the server.
+
+#ifndef COMPONENTS_METRICS_METRICS_LOG_H_
+#define COMPONENTS_METRICS_METRICS_LOG_H_
+
+#include <stdint.h>
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "base/macros.h"
+#include "base/time/time.h"
+#include "components/metrics/metrics_service_client.h"
+#include "third_party/metrics_proto/chrome_user_metrics_extension.pb.h"
+
+class PrefService;
+
+namespace base {
+class HistogramSamples;
+}
+
+namespace metrics {
+
+class MetricsProvider;
+class MetricsServiceClient;
+class DelegatingProvider;
+
+namespace internal {
+// Maximum number of events before truncation.
+constexpr int kOmniboxEventLimit = 5000;
+constexpr int kUserActionEventLimit = 5000;
+} // namespace internal
+
+class MetricsLog {
+ public:
+ enum LogType {
+ INITIAL_STABILITY_LOG, // The initial log containing stability stats.
+ ONGOING_LOG, // Subsequent logs in a session.
+ INDEPENDENT_LOG, // An independent log from a previous session.
+ };
+
+ // Creates a new metrics log of the specified type.
+ // |client_id| is the identifier for this profile on this installation
+ // |session_id| is an integer that's incremented on each application launch
+ // |client| is used to interact with the embedder.
+ // |local_state| is the PrefService that this instance should use.
+ // Note: |this| instance does not take ownership of the |client|, but rather
+ // stores a weak pointer to it. The caller should ensure that the |client| is
+ // valid for the lifetime of this class.
+ MetricsLog(const std::string& client_id,
+ int session_id,
+ LogType log_type,
+ MetricsServiceClient* client);
+ virtual ~MetricsLog();
+
+ // Registers local state prefs used by this class.
+ static void RegisterPrefs(PrefRegistrySimple* registry);
+
+ // Computes the MD5 hash of the given string, and returns the first 8 bytes of
+ // the hash.
+ static uint64_t Hash(const std::string& value);
+
+ // Get the GMT buildtime for the current binary, expressed in seconds since
+ // January 1, 1970 GMT.
+ // The value is used to identify when a new build is run, so that previous
+ // reliability stats, from other builds, can be abandoned.
+ static int64_t GetBuildTime();
+
+ // Convenience function to return the current time at a resolution in seconds.
+ // This wraps base::TimeTicks, and hence provides an abstract time that is
+ // always incrementing for use in measuring time durations.
+ static int64_t GetCurrentTime();
+
+ // Record core profile settings into the SystemProfileProto.
+ static void RecordCoreSystemProfile(MetricsServiceClient* client,
+ SystemProfileProto* system_profile);
+
+ // Records a user-initiated action.
+ void RecordUserAction(const std::string& key);
+
+ // Record any changes in a given histogram for transmission.
+ void RecordHistogramDelta(const std::string& histogram_name,
+ const base::HistogramSamples& snapshot);
+
+ // TODO(rkaplow): I think this can be a little refactored as it currently
+ // records a pretty arbitrary set of things.
+ // Records the current operating environment, including metrics provided by
+ // the specified |delegating_provider|. The current environment is
+ // returned as a SystemProfileProto.
+ const SystemProfileProto& RecordEnvironment(
+ DelegatingProvider* delegating_provider);
+
+ // Loads a saved system profile and the associated metrics into the log.
+ // Returns true on success. Keep calling it with fresh logs until it returns
+ // false.
+ bool LoadIndependentMetrics(MetricsProvider* metrics_provider);
+
+ // Loads the environment proto that was saved by the last RecordEnvironment()
+ // call from prefs. On success, returns true and |app_version| contains the
+ // recovered version. Otherwise (if there was no saved environment in prefs
+ // or it could not be decoded), returns false and |app_version| is empty.
+ bool LoadSavedEnvironmentFromPrefs(PrefService* local_state,
+ std::string* app_version);
+
+ // Record data from providers about the previous session into the log.
+ void RecordPreviousSessionData(DelegatingProvider* delegating_provider);
+
+ // Record data from providers about the current session into the log.
+ void RecordCurrentSessionData(DelegatingProvider* delegating_provider,
+ base::TimeDelta incremental_uptime,
+ base::TimeDelta uptime);
+
+ // Stop writing to this record and generate the encoded representation.
+ // None of the Record* methods can be called after this is called.
+ void CloseLog();
+
+ // Truncate some of the fields within the log that we want to restrict in
+ // size due to bandwidth concerns.
+ void TruncateEvents();
+
+ // Fills |encoded_log| with the serialized protobuf representation of the
+ // record. Must only be called after CloseLog() has been called.
+ void GetEncodedLog(std::string* encoded_log);
+
+ const base::TimeTicks& creation_time() const {
+ return creation_time_;
+ }
+
+ LogType log_type() const { return log_type_; }
+
+ protected:
+ // Exposed for the sake of mocking/accessing in test code.
+
+ ChromeUserMetricsExtension* uma_proto() { return &uma_proto_; }
+
+ // Exposed to allow subclass to access to export the uma_proto. Can be used
+ // by external components to export logs to Chrome.
+ const ChromeUserMetricsExtension* uma_proto() const {
+ return &uma_proto_;
+ }
+
+ private:
+ // Write the default state of the enable metrics checkbox.
+ void WriteMetricsEnableDefault(EnableMetricsDefault metrics_default,
+ SystemProfileProto* system_profile);
+
+ // Within the stability group, write attributes that need to be updated asap
+ // and can't be delayed until the user decides to restart chromium.
+ // Delaying these stats would bias metrics away from happy long lived
+ // chromium processes (ones that don't crash, and keep on running).
+ void WriteRealtimeStabilityAttributes(base::TimeDelta incremental_uptime,
+ base::TimeDelta uptime);
+
+ // closed_ is true when record has been packed up for sending, and should
+ // no longer be written to. It is only used for sanity checking.
+ bool closed_;
+
+ // The type of the log, i.e. initial or ongoing.
+ const LogType log_type_;
+
+ // Stores the protocol buffer representation for this log.
+ ChromeUserMetricsExtension uma_proto_;
+
+ // Used to interact with the embedder. Weak pointer; must outlive |this|
+ // instance.
+ MetricsServiceClient* const client_;
+
+ // The time when the current log was created.
+ const base::TimeTicks creation_time_;
+
+ // True if the environment has already been filled in by a call to
+ // RecordEnvironment() or LoadSavedEnvironmentFromPrefs().
+ bool has_environment_;
+
+ DISALLOW_COPY_AND_ASSIGN(MetricsLog);
+};
+
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_METRICS_LOG_H_
diff --git a/components/metrics/metrics_log_manager.cc b/components/metrics/metrics_log_manager.cc
new file mode 100644
index 0000000..d90aa7b
--- /dev/null
+++ b/components/metrics/metrics_log_manager.cc
@@ -0,0 +1,51 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/metrics_log_manager.h"
+
+#include <algorithm>
+#include <utility>
+
+#include "base/strings/string_util.h"
+#include "components/metrics/metrics_log.h"
+#include "components/metrics/metrics_log_store.h"
+#include "components/metrics/metrics_pref_names.h"
+
+namespace metrics {
+
+MetricsLogManager::MetricsLogManager() {}
+
+MetricsLogManager::~MetricsLogManager() {}
+
+void MetricsLogManager::BeginLoggingWithLog(std::unique_ptr<MetricsLog> log) {
+ DCHECK(!current_log_);
+ current_log_ = std::move(log);
+}
+
+void MetricsLogManager::FinishCurrentLog(MetricsLogStore* log_store) {
+ DCHECK(current_log_);
+ current_log_->CloseLog();
+ std::string log_data;
+ current_log_->GetEncodedLog(&log_data);
+ if (!log_data.empty())
+ log_store->StoreLog(log_data, current_log_->log_type());
+ current_log_.reset();
+}
+
+void MetricsLogManager::DiscardCurrentLog() {
+ current_log_->CloseLog();
+ current_log_.reset();
+}
+
+void MetricsLogManager::PauseCurrentLog() {
+ DCHECK(!paused_log_);
+ paused_log_ = std::move(current_log_);
+}
+
+void MetricsLogManager::ResumePausedLog() {
+ DCHECK(!current_log_);
+ current_log_ = std::move(paused_log_);
+}
+
+} // namespace metrics
diff --git a/components/metrics/metrics_log_manager.h b/components/metrics/metrics_log_manager.h
new file mode 100644
index 0000000..3227617
--- /dev/null
+++ b/components/metrics/metrics_log_manager.h
@@ -0,0 +1,65 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_METRICS_LOG_MANAGER_H_
+#define COMPONENTS_METRICS_METRICS_LOG_MANAGER_H_
+
+#include <stddef.h>
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "base/macros.h"
+#include "components/metrics/metrics_log.h"
+
+namespace metrics {
+
+class MetricsLogStore;
+
+// Manages all the log objects used by a MetricsService implementation. Keeps
+// track of an in-progress log and a paused log.
+class MetricsLogManager {
+ public:
+ MetricsLogManager();
+ ~MetricsLogManager();
+
+ // Makes |log| the current_log. This should only be called if there is not a
+ // current log.
+ void BeginLoggingWithLog(std::unique_ptr<MetricsLog> log);
+
+ // Returns the in-progress log.
+ MetricsLog* current_log() { return current_log_.get(); }
+
+ // Closes |current_log_|, compresses it, and stores it in the |log_store| for
+ // later, leaving |current_log_| NULL.
+ void FinishCurrentLog(MetricsLogStore* log_store);
+
+ // Closes and discards |current_log|.
+ void DiscardCurrentLog();
+
+ // Sets current_log to NULL, but saves the current log for future use with
+ // ResumePausedLog(). Only one log may be paused at a time.
+ // TODO(stuartmorgan): Pause/resume support is really a workaround for a
+ // design issue in initial log writing; that should be fixed, and pause/resume
+ // removed.
+ void PauseCurrentLog();
+
+ // Restores the previously paused log (if any) to current_log().
+ // This should only be called if there is not a current log.
+ void ResumePausedLog();
+
+ private:
+ // The log that we are still appending to.
+ std::unique_ptr<MetricsLog> current_log_;
+
+ // A paused, previously-current log.
+ std::unique_ptr<MetricsLog> paused_log_;
+
+ DISALLOW_COPY_AND_ASSIGN(MetricsLogManager);
+};
+
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_METRICS_LOG_MANAGER_H_
diff --git a/components/metrics/metrics_log_manager_unittest.cc b/components/metrics/metrics_log_manager_unittest.cc
new file mode 100644
index 0000000..4e3004e
--- /dev/null
+++ b/components/metrics/metrics_log_manager_unittest.cc
@@ -0,0 +1,127 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/metrics_log_manager.h"
+
+#include <stddef.h>
+
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "base/memory/ptr_util.h"
+#include "components/metrics/metrics_log.h"
+#include "components/metrics/metrics_log_store.h"
+#include "components/metrics/metrics_pref_names.h"
+#include "components/metrics/persisted_logs_metrics_impl.h"
+#include "components/metrics/test_metrics_service_client.h"
+#include "components/prefs/pref_registry_simple.h"
+#include "components/prefs/testing_pref_service.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace metrics {
+
+namespace {
+
+class MetricsLogManagerTest : public testing::Test {
+ public:
+ MetricsLogManagerTest() : log_store_(&pref_service_, 0) {
+ MetricsLogStore::RegisterPrefs(pref_service_.registry());
+ log_store()->LoadPersistedUnsentLogs();
+ }
+ ~MetricsLogManagerTest() override {}
+
+ MetricsLogStore* log_store() { return &log_store_; }
+
+ MetricsLog* CreateLog(MetricsLog::LogType log_type) {
+ return new MetricsLog("id", 0, log_type, &client_);
+ }
+
+ private:
+ TestMetricsServiceClient client_;
+ TestingPrefServiceSimple pref_service_;
+ MetricsLogStore log_store_;
+
+ DISALLOW_COPY_AND_ASSIGN(MetricsLogManagerTest);
+};
+
+} // namespace
+
+TEST_F(MetricsLogManagerTest, StandardFlow) {
+ MetricsLogManager log_manager;
+
+ // Make sure a new manager has a clean slate.
+ EXPECT_EQ(nullptr, log_manager.current_log());
+
+ // Check that the normal flow works.
+ MetricsLog* initial_log = CreateLog(MetricsLog::INITIAL_STABILITY_LOG);
+ log_manager.BeginLoggingWithLog(base::WrapUnique(initial_log));
+ EXPECT_EQ(initial_log, log_manager.current_log());
+
+ EXPECT_FALSE(log_store()->has_unsent_logs());
+ log_manager.FinishCurrentLog(log_store());
+ EXPECT_EQ(nullptr, log_manager.current_log());
+ EXPECT_TRUE(log_store()->has_unsent_logs());
+
+ MetricsLog* second_log = CreateLog(MetricsLog::ONGOING_LOG);
+ log_manager.BeginLoggingWithLog(base::WrapUnique(second_log));
+ EXPECT_EQ(second_log, log_manager.current_log());
+}
+
+TEST_F(MetricsLogManagerTest, AbandonedLog) {
+ MetricsLogManager log_manager;
+
+ MetricsLog* dummy_log = CreateLog(MetricsLog::INITIAL_STABILITY_LOG);
+ log_manager.BeginLoggingWithLog(base::WrapUnique(dummy_log));
+ EXPECT_EQ(dummy_log, log_manager.current_log());
+
+ log_manager.DiscardCurrentLog();
+ EXPECT_EQ(nullptr, log_manager.current_log());
+}
+
+// Make sure that interjecting logs updates the "current" log correctly.
+TEST_F(MetricsLogManagerTest, InterjectedLog) {
+ MetricsLogManager log_manager;
+
+ MetricsLog* ongoing_log = CreateLog(MetricsLog::ONGOING_LOG);
+ MetricsLog* temp_log = CreateLog(MetricsLog::INITIAL_STABILITY_LOG);
+
+ log_manager.BeginLoggingWithLog(base::WrapUnique(ongoing_log));
+ EXPECT_EQ(ongoing_log, log_manager.current_log());
+
+ log_manager.PauseCurrentLog();
+ EXPECT_EQ(nullptr, log_manager.current_log());
+
+ log_manager.BeginLoggingWithLog(base::WrapUnique(temp_log));
+ EXPECT_EQ(temp_log, log_manager.current_log());
+ log_manager.FinishCurrentLog(log_store());
+ EXPECT_EQ(nullptr, log_manager.current_log());
+
+ log_manager.ResumePausedLog();
+ EXPECT_EQ(ongoing_log, log_manager.current_log());
+}
+
+// Make sure that when one log is interjected by another, that finishing them
+// creates logs of the correct type.
+TEST_F(MetricsLogManagerTest, InterjectedLogPreservesType) {
+ MetricsLogManager log_manager;
+
+ log_manager.BeginLoggingWithLog(
+ base::WrapUnique(CreateLog(MetricsLog::ONGOING_LOG)));
+ log_manager.PauseCurrentLog();
+ log_manager.BeginLoggingWithLog(
+ base::WrapUnique(CreateLog(MetricsLog::INITIAL_STABILITY_LOG)));
+ log_manager.FinishCurrentLog(log_store());
+ log_manager.ResumePausedLog();
+ // Finishing the interjecting inital log should have stored an initial log.
+ EXPECT_EQ(1U, log_store()->initial_log_count());
+ EXPECT_EQ(0U, log_store()->ongoing_log_count());
+
+ // Finishing the interjected ongoing log should store an ongoing log.
+ log_manager.FinishCurrentLog(log_store());
+ EXPECT_EQ(1U, log_store()->initial_log_count());
+ EXPECT_EQ(1U, log_store()->ongoing_log_count());
+}
+
+} // namespace metrics
diff --git a/components/metrics/metrics_log_store.cc b/components/metrics/metrics_log_store.cc
new file mode 100644
index 0000000..30cbe6c
--- /dev/null
+++ b/components/metrics/metrics_log_store.cc
@@ -0,0 +1,129 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/metrics_log_store.h"
+
+#include "components/metrics/metrics_pref_names.h"
+#include "components/metrics/persisted_logs_metrics_impl.h"
+#include "components/prefs/pref_registry_simple.h"
+
+namespace metrics {
+
+namespace {
+
+// The number of "initial" logs to save, and hope to send during a future Chrome
+// session. Initial logs contain crash stats, and are pretty small.
+const size_t kInitialLogsPersistLimit = 20;
+
+// The number of ongoing logs to save persistently, and hope to
+// send during a this or future sessions. Note that each log may be pretty
+// large, as presumably the related "initial" log wasn't sent (probably nothing
+// was, as the user was probably off-line). As a result, the log probably kept
+// accumulating while the "initial" log was stalled, and couldn't be sent. As a
+// result, we don't want to save too many of these mega-logs.
+// A "standard shutdown" will create a small log, including just the data that
+// was not yet been transmitted, and that is normal (to have exactly one
+// ongoing_log_ at startup).
+const size_t kOngoingLogsPersistLimit = 8;
+
+// The number of bytes of logs to save of each type (initial/ongoing).
+// This ensures that a reasonable amount of history will be stored even if there
+// is a long series of very small logs.
+const size_t kStorageByteLimitPerLogType = 300 * 1000; // ~300kB
+
+} // namespace
+
+// static
+void MetricsLogStore::RegisterPrefs(PrefRegistrySimple* registry) {
+ registry->RegisterListPref(prefs::kMetricsInitialLogs);
+ registry->RegisterListPref(prefs::kMetricsOngoingLogs);
+}
+
+MetricsLogStore::MetricsLogStore(PrefService* local_state,
+ size_t max_ongoing_log_size)
+ : unsent_logs_loaded_(false),
+ initial_log_queue_(std::unique_ptr<PersistedLogsMetricsImpl>(
+ new PersistedLogsMetricsImpl()),
+ local_state,
+ prefs::kMetricsInitialLogs,
+ kInitialLogsPersistLimit,
+ kStorageByteLimitPerLogType,
+ 0),
+ ongoing_log_queue_(std::unique_ptr<PersistedLogsMetricsImpl>(
+ new PersistedLogsMetricsImpl()),
+ local_state,
+ prefs::kMetricsOngoingLogs,
+ kOngoingLogsPersistLimit,
+ kStorageByteLimitPerLogType,
+ max_ongoing_log_size) {}
+
+MetricsLogStore::~MetricsLogStore() {}
+
+void MetricsLogStore::LoadPersistedUnsentLogs() {
+ initial_log_queue_.LoadPersistedUnsentLogs();
+ ongoing_log_queue_.LoadPersistedUnsentLogs();
+ unsent_logs_loaded_ = true;
+}
+
+void MetricsLogStore::StoreLog(const std::string& log_data,
+ MetricsLog::LogType log_type) {
+ switch (log_type) {
+ case MetricsLog::INITIAL_STABILITY_LOG:
+ initial_log_queue_.StoreLog(log_data);
+ break;
+ case MetricsLog::ONGOING_LOG:
+ case MetricsLog::INDEPENDENT_LOG:
+ ongoing_log_queue_.StoreLog(log_data);
+ break;
+ }
+}
+
+bool MetricsLogStore::has_unsent_logs() const {
+ return initial_log_queue_.has_unsent_logs() ||
+ ongoing_log_queue_.has_unsent_logs();
+}
+
+bool MetricsLogStore::has_staged_log() const {
+ return initial_log_queue_.has_staged_log() ||
+ ongoing_log_queue_.has_staged_log();
+}
+
+const std::string& MetricsLogStore::staged_log() const {
+ return initial_log_queue_.has_staged_log() ? initial_log_queue_.staged_log()
+ : ongoing_log_queue_.staged_log();
+}
+
+const std::string& MetricsLogStore::staged_log_hash() const {
+ return initial_log_queue_.has_staged_log()
+ ? initial_log_queue_.staged_log_hash()
+ : ongoing_log_queue_.staged_log_hash();
+}
+
+void MetricsLogStore::StageNextLog() {
+ DCHECK(!has_staged_log());
+ if (initial_log_queue_.has_unsent_logs())
+ initial_log_queue_.StageNextLog();
+ else
+ ongoing_log_queue_.StageNextLog();
+}
+
+void MetricsLogStore::DiscardStagedLog() {
+ DCHECK(has_staged_log());
+ if (initial_log_queue_.has_staged_log())
+ initial_log_queue_.DiscardStagedLog();
+ else
+ ongoing_log_queue_.DiscardStagedLog();
+ DCHECK(!has_staged_log());
+}
+
+void MetricsLogStore::PersistUnsentLogs() const {
+ DCHECK(unsent_logs_loaded_);
+ if (!unsent_logs_loaded_)
+ return;
+
+ initial_log_queue_.PersistUnsentLogs();
+ ongoing_log_queue_.PersistUnsentLogs();
+}
+
+} // namespace metrics
diff --git a/components/metrics/metrics_log_store.h b/components/metrics/metrics_log_store.h
new file mode 100644
index 0000000..b9658d6
--- /dev/null
+++ b/components/metrics/metrics_log_store.h
@@ -0,0 +1,67 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_METRICS_LOG_STORE_H_
+#define COMPONENTS_METRICS_METRICS_LOG_STORE_H_
+
+#include <string>
+
+#include "base/macros.h"
+#include "components/metrics/log_store.h"
+#include "components/metrics/metrics_log.h"
+#include "components/metrics/persisted_logs.h"
+
+class PrefService;
+class PrefRegistrySimple;
+
+namespace metrics {
+
+// A LogStore implementation for storing UMA logs.
+// This implementation keeps track of two types of logs, initial and ongoing,
+// each stored in PersistedLogs. It prioritizes staging initial logs over
+// ongoing logs.
+class MetricsLogStore : public LogStore {
+ public:
+ // Constructs a MetricsLogStore that persists data into |local_state|.
+ // If max_log_size is non-zero, it will not persist ongoing logs larger than
+ // |max_ongoing_log_size| bytes.
+ MetricsLogStore(PrefService* local_state, size_t max_ongoing_log_size);
+ ~MetricsLogStore();
+
+ // Registers local state prefs used by this class.
+ static void RegisterPrefs(PrefRegistrySimple* registry);
+
+ // Saves |log_data| as the given type.
+ void StoreLog(const std::string& log_data, MetricsLog::LogType log_type);
+
+ // LogStore:
+ bool has_unsent_logs() const override;
+ bool has_staged_log() const override;
+ const std::string& staged_log() const override;
+ const std::string& staged_log_hash() const override;
+ void StageNextLog() override;
+ void DiscardStagedLog() override;
+ void PersistUnsentLogs() const override;
+ void LoadPersistedUnsentLogs() override;
+
+ // Inspection methods for tests.
+ size_t ongoing_log_count() const { return ongoing_log_queue_.size(); }
+ size_t initial_log_count() const { return initial_log_queue_.size(); }
+
+ private:
+ // Tracks whether unsent logs (if any) have been loaded from the serializer.
+ bool unsent_logs_loaded_;
+
+ // Logs stored with the INITIAL_STABILITY_LOG type that haven't been sent yet.
+ // These logs will be staged first when staging new logs.
+ PersistedLogs initial_log_queue_;
+ // Logs stored with the ONGOING_LOG type that haven't been sent yet.
+ PersistedLogs ongoing_log_queue_;
+
+ DISALLOW_COPY_AND_ASSIGN(MetricsLogStore);
+};
+
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_METRICS_LOG_STORE_H_
diff --git a/components/metrics/metrics_log_store_unittest.cc b/components/metrics/metrics_log_store_unittest.cc
new file mode 100644
index 0000000..50b78c7
--- /dev/null
+++ b/components/metrics/metrics_log_store_unittest.cc
@@ -0,0 +1,194 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/metrics_log_store.h"
+
+#include "components/metrics/metrics_pref_names.h"
+#include "components/metrics/test_metrics_service_client.h"
+#include "components/prefs/testing_pref_service.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace metrics {
+
+namespace {
+
+class MetricsLogStoreTest : public testing::Test {
+ public:
+ MetricsLogStoreTest() {
+ MetricsLogStore::RegisterPrefs(pref_service_.registry());
+ }
+ ~MetricsLogStoreTest() override {}
+
+ MetricsLog* CreateLog(MetricsLog::LogType log_type) {
+ return new MetricsLog("id", 0, log_type, &client_);
+ }
+
+ // Returns the stored number of logs of the given type.
+ size_t TypeCount(MetricsLog::LogType log_type) {
+ const char* pref = log_type == MetricsLog::INITIAL_STABILITY_LOG
+ ? prefs::kMetricsInitialLogs
+ : prefs::kMetricsOngoingLogs;
+ return pref_service_.GetList(pref)->GetSize();
+ }
+
+ TestMetricsServiceClient client_;
+ TestingPrefServiceSimple pref_service_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MetricsLogStoreTest);
+};
+
+} // namespace
+
+TEST_F(MetricsLogStoreTest, StandardFlow) {
+ MetricsLogStore log_store(&pref_service_, 0);
+ log_store.LoadPersistedUnsentLogs();
+
+ // Make sure a new manager has a clean slate.
+ EXPECT_FALSE(log_store.has_staged_log());
+ EXPECT_FALSE(log_store.has_unsent_logs());
+
+ log_store.StoreLog("a", MetricsLog::ONGOING_LOG);
+ EXPECT_TRUE(log_store.has_unsent_logs());
+ EXPECT_FALSE(log_store.has_staged_log());
+
+ log_store.StageNextLog();
+ EXPECT_TRUE(log_store.has_staged_log());
+ EXPECT_FALSE(log_store.staged_log().empty());
+
+ log_store.DiscardStagedLog();
+ EXPECT_FALSE(log_store.has_staged_log());
+ EXPECT_FALSE(log_store.has_unsent_logs());
+}
+
+TEST_F(MetricsLogStoreTest, StoreAndLoad) {
+ // Set up some in-progress logging in a scoped log manager simulating the
+ // leadup to quitting, then persist as would be done on quit.
+ {
+ MetricsLogStore log_store(&pref_service_, 0);
+ log_store.LoadPersistedUnsentLogs();
+ EXPECT_FALSE(log_store.has_unsent_logs());
+ log_store.StoreLog("a", MetricsLog::ONGOING_LOG);
+ log_store.PersistUnsentLogs();
+ EXPECT_EQ(0U, TypeCount(MetricsLog::INITIAL_STABILITY_LOG));
+ EXPECT_EQ(1U, TypeCount(MetricsLog::ONGOING_LOG));
+ }
+
+ // Relaunch load and store more logs.
+ {
+ MetricsLogStore log_store(&pref_service_, 0);
+ log_store.LoadPersistedUnsentLogs();
+ EXPECT_TRUE(log_store.has_unsent_logs());
+ EXPECT_EQ(0U, TypeCount(MetricsLog::INITIAL_STABILITY_LOG));
+ EXPECT_EQ(1U, TypeCount(MetricsLog::ONGOING_LOG));
+ log_store.StoreLog("x", MetricsLog::INITIAL_STABILITY_LOG);
+ log_store.StageNextLog();
+ log_store.StoreLog("b", MetricsLog::ONGOING_LOG);
+
+ EXPECT_TRUE(log_store.has_unsent_logs());
+ EXPECT_TRUE(log_store.has_staged_log());
+ EXPECT_EQ(0U, TypeCount(MetricsLog::INITIAL_STABILITY_LOG));
+ EXPECT_EQ(1U, TypeCount(MetricsLog::ONGOING_LOG));
+
+ log_store.PersistUnsentLogs();
+ EXPECT_EQ(1U, TypeCount(MetricsLog::INITIAL_STABILITY_LOG));
+ EXPECT_EQ(2U, TypeCount(MetricsLog::ONGOING_LOG));
+ }
+
+ // Relaunch and verify that once logs are handled they are not re-persisted.
+ {
+ MetricsLogStore log_store(&pref_service_, 0);
+ log_store.LoadPersistedUnsentLogs();
+ EXPECT_TRUE(log_store.has_unsent_logs());
+
+ log_store.StageNextLog();
+ log_store.DiscardStagedLog();
+ // The initial log should be sent first; update the persisted storage to
+ // verify.
+ log_store.PersistUnsentLogs();
+ EXPECT_EQ(0U, TypeCount(MetricsLog::INITIAL_STABILITY_LOG));
+ EXPECT_EQ(2U, TypeCount(MetricsLog::ONGOING_LOG));
+
+ // Handle the first ongoing log.
+ log_store.StageNextLog();
+ log_store.DiscardStagedLog();
+ EXPECT_TRUE(log_store.has_unsent_logs());
+
+ // Handle the last log.
+ log_store.StageNextLog();
+ log_store.DiscardStagedLog();
+ EXPECT_FALSE(log_store.has_unsent_logs());
+
+ // Nothing should have changed "on disk" since PersistUnsentLogs hasn't been
+ // called again.
+ EXPECT_EQ(2U, TypeCount(MetricsLog::ONGOING_LOG));
+ // Persist, and make sure nothing is left.
+ log_store.PersistUnsentLogs();
+ EXPECT_EQ(0U, TypeCount(MetricsLog::INITIAL_STABILITY_LOG));
+ EXPECT_EQ(0U, TypeCount(MetricsLog::ONGOING_LOG));
+ }
+}
+
+TEST_F(MetricsLogStoreTest, StoreStagedOngoingLog) {
+ // Ensure that types are preserved when storing staged logs.
+ MetricsLogStore log_store(&pref_service_, 0);
+ log_store.LoadPersistedUnsentLogs();
+ log_store.StoreLog("a", MetricsLog::ONGOING_LOG);
+ log_store.StageNextLog();
+ log_store.PersistUnsentLogs();
+
+ EXPECT_EQ(0U, TypeCount(MetricsLog::INITIAL_STABILITY_LOG));
+ EXPECT_EQ(1U, TypeCount(MetricsLog::ONGOING_LOG));
+}
+
+TEST_F(MetricsLogStoreTest, StoreStagedInitialLog) {
+ // Ensure that types are preserved when storing staged logs.
+ MetricsLogStore log_store(&pref_service_, 0);
+ log_store.LoadPersistedUnsentLogs();
+ log_store.StoreLog("b", MetricsLog::INITIAL_STABILITY_LOG);
+ log_store.StageNextLog();
+ log_store.PersistUnsentLogs();
+
+ EXPECT_EQ(1U, TypeCount(MetricsLog::INITIAL_STABILITY_LOG));
+ EXPECT_EQ(0U, TypeCount(MetricsLog::ONGOING_LOG));
+}
+
+TEST_F(MetricsLogStoreTest, LargeLogDiscarding) {
+ // Set the size threshold very low, to verify that it's honored.
+ MetricsLogStore log_store(&pref_service_, 1);
+ log_store.LoadPersistedUnsentLogs();
+
+ log_store.StoreLog("persisted", MetricsLog::INITIAL_STABILITY_LOG);
+ log_store.StoreLog("not_persisted", MetricsLog::ONGOING_LOG);
+
+ // Only the stability log should be written out, due to the threshold.
+ log_store.PersistUnsentLogs();
+ EXPECT_EQ(1U, TypeCount(MetricsLog::INITIAL_STABILITY_LOG));
+ EXPECT_EQ(0U, TypeCount(MetricsLog::ONGOING_LOG));
+}
+
+TEST_F(MetricsLogStoreTest, DiscardOrder) {
+ // Ensure that the correct log is discarded if new logs are pushed while
+ // a log is staged.
+ MetricsLogStore log_store(&pref_service_, 0);
+ log_store.LoadPersistedUnsentLogs();
+
+ log_store.StoreLog("a", MetricsLog::ONGOING_LOG);
+ log_store.StoreLog("b", MetricsLog::ONGOING_LOG);
+ log_store.StageNextLog();
+ log_store.StoreLog("c", MetricsLog::INITIAL_STABILITY_LOG);
+ EXPECT_EQ(2U, log_store.ongoing_log_count());
+ EXPECT_EQ(1U, log_store.initial_log_count());
+ // Should discard the ongoing log staged earlier.
+ log_store.DiscardStagedLog();
+ EXPECT_EQ(1U, log_store.ongoing_log_count());
+ EXPECT_EQ(1U, log_store.initial_log_count());
+ // Initial log should be staged next.
+ log_store.StageNextLog();
+ log_store.DiscardStagedLog();
+ EXPECT_EQ(1U, log_store.ongoing_log_count());
+ EXPECT_EQ(0U, log_store.initial_log_count());
+}
+
+} // namespace metrics
diff --git a/components/metrics/metrics_log_unittest.cc b/components/metrics/metrics_log_unittest.cc
new file mode 100644
index 0000000..8af49a8
--- /dev/null
+++ b/components/metrics/metrics_log_unittest.cc
@@ -0,0 +1,360 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/metrics_log.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <string>
+
+#include "base/base64.h"
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/metrics/bucket_ranges.h"
+#include "base/metrics/sample_vector.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/stringprintf.h"
+#include "base/sys_info.h"
+#include "base/time/time.h"
+#include "components/metrics/delegating_provider.h"
+#include "components/metrics/environment_recorder.h"
+#include "components/metrics/metrics_pref_names.h"
+#include "components/metrics/metrics_state_manager.h"
+#include "components/metrics/test_metrics_provider.h"
+#include "components/metrics/test_metrics_service_client.h"
+#include "components/prefs/pref_service.h"
+#include "components/prefs/testing_pref_service.h"
+#include "components/variations/active_field_trials.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/metrics_proto/chrome_user_metrics_extension.pb.h"
+
+#if defined(OS_ANDROID)
+#include "base/android/build_info.h"
+#endif
+
+#if defined(OS_WIN)
+#include "base/win/current_module.h"
+#endif
+
+namespace metrics {
+
+namespace {
+
+const char kClientId[] = "bogus client ID";
+const int kSessionId = 127;
+
+class TestMetricsLog : public MetricsLog {
+ public:
+ TestMetricsLog(const std::string& client_id,
+ int session_id,
+ LogType log_type,
+ MetricsServiceClient* client)
+ : MetricsLog(client_id, session_id, log_type, client) {}
+
+ ~TestMetricsLog() override {}
+
+ const ChromeUserMetricsExtension& uma_proto() const {
+ return *MetricsLog::uma_proto();
+ }
+
+ ChromeUserMetricsExtension* mutable_uma_proto() {
+ return MetricsLog::uma_proto();
+ }
+
+ const SystemProfileProto& system_profile() const {
+ return uma_proto().system_profile();
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(TestMetricsLog);
+};
+
+} // namespace
+
+class MetricsLogTest : public testing::Test {
+ public:
+ MetricsLogTest() {}
+ ~MetricsLogTest() override {}
+
+ protected:
+ // Check that the values in |system_values| correspond to the test data
+ // defined at the top of this file.
+ void CheckSystemProfile(const SystemProfileProto& system_profile) {
+ EXPECT_EQ(TestMetricsServiceClient::kBrandForTesting,
+ system_profile.brand_code());
+
+ const SystemProfileProto::Hardware& hardware =
+ system_profile.hardware();
+
+ EXPECT_TRUE(hardware.has_cpu());
+ EXPECT_TRUE(hardware.cpu().has_vendor_name());
+ EXPECT_TRUE(hardware.cpu().has_signature());
+ EXPECT_TRUE(hardware.cpu().has_num_cores());
+
+ // TODO(isherman): Verify other data written into the protobuf as a result
+ // of this call.
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MetricsLogTest);
+};
+
+TEST_F(MetricsLogTest, LogType) {
+ TestMetricsServiceClient client;
+ TestingPrefServiceSimple prefs;
+
+ MetricsLog log1("id", 0, MetricsLog::ONGOING_LOG, &client);
+ EXPECT_EQ(MetricsLog::ONGOING_LOG, log1.log_type());
+
+ MetricsLog log2("id", 0, MetricsLog::INITIAL_STABILITY_LOG, &client);
+ EXPECT_EQ(MetricsLog::INITIAL_STABILITY_LOG, log2.log_type());
+}
+
+TEST_F(MetricsLogTest, BasicRecord) {
+ TestMetricsServiceClient client;
+ client.set_version_string("bogus version");
+ TestingPrefServiceSimple prefs;
+ MetricsLog log("totally bogus client ID", 137, MetricsLog::ONGOING_LOG,
+ &client);
+ log.CloseLog();
+
+ std::string encoded;
+ log.GetEncodedLog(&encoded);
+
+ // A couple of fields are hard to mock, so these will be copied over directly
+ // for the expected output.
+ ChromeUserMetricsExtension parsed;
+ ASSERT_TRUE(parsed.ParseFromString(encoded));
+
+ ChromeUserMetricsExtension expected;
+ expected.set_client_id(5217101509553811875); // Hashed bogus client ID
+ expected.set_session_id(137);
+
+ SystemProfileProto* system_profile = expected.mutable_system_profile();
+ system_profile->set_app_version("bogus version");
+ system_profile->set_channel(client.GetChannel());
+ system_profile->set_application_locale(client.GetApplicationLocale());
+
+#if defined(ADDRESS_SANITIZER)
+ system_profile->set_is_asan_build(true);
+#endif
+ metrics::SystemProfileProto::Hardware* hardware =
+ system_profile->mutable_hardware();
+#if !defined(OS_IOS)
+ hardware->set_cpu_architecture(base::SysInfo::OperatingSystemArchitecture());
+#endif
+ hardware->set_system_ram_mb(base::SysInfo::AmountOfPhysicalMemoryMB());
+ hardware->set_hardware_class(base::SysInfo::HardwareModelName());
+#if defined(OS_WIN)
+ hardware->set_dll_base(reinterpret_cast<uint64_t>(CURRENT_MODULE()));
+#endif
+
+ system_profile->mutable_os()->set_name(base::SysInfo::OperatingSystemName());
+ system_profile->mutable_os()->set_version(
+ base::SysInfo::OperatingSystemVersion());
+#if defined(OS_CHROMEOS)
+ system_profile->mutable_os()->set_kernel_version(
+ base::SysInfo::KernelVersion());
+#elif defined(OS_ANDROID)
+ system_profile->mutable_os()->set_build_fingerprint(
+ base::android::BuildInfo::GetInstance()->android_build_fp());
+ system_profile->set_app_package_name("test app");
+#endif
+
+ // Hard to mock.
+ system_profile->set_build_timestamp(
+ parsed.system_profile().build_timestamp());
+
+ EXPECT_EQ(expected.SerializeAsString(), encoded);
+}
+
+TEST_F(MetricsLogTest, HistogramBucketFields) {
+ // Create buckets: 1-5, 5-7, 7-8, 8-9, 9-10, 10-11, 11-12.
+ base::BucketRanges ranges(8);
+ ranges.set_range(0, 1);
+ ranges.set_range(1, 5);
+ ranges.set_range(2, 7);
+ ranges.set_range(3, 8);
+ ranges.set_range(4, 9);
+ ranges.set_range(5, 10);
+ ranges.set_range(6, 11);
+ ranges.set_range(7, 12);
+
+ base::SampleVector samples(1, &ranges);
+ samples.Accumulate(3, 1); // Bucket 1-5.
+ samples.Accumulate(6, 1); // Bucket 5-7.
+ samples.Accumulate(8, 1); // Bucket 8-9. (7-8 skipped)
+ samples.Accumulate(10, 1); // Bucket 10-11. (9-10 skipped)
+ samples.Accumulate(11, 1); // Bucket 11-12.
+
+ TestMetricsServiceClient client;
+ TestingPrefServiceSimple prefs;
+ TestMetricsLog log(kClientId, kSessionId, MetricsLog::ONGOING_LOG, &client);
+ log.RecordHistogramDelta("Test", samples);
+
+ const ChromeUserMetricsExtension& uma_proto = log.uma_proto();
+ const HistogramEventProto& histogram_proto =
+ uma_proto.histogram_event(uma_proto.histogram_event_size() - 1);
+
+ // Buckets with samples: 1-5, 5-7, 8-9, 10-11, 11-12.
+ // Should become: 1-/, 5-7, /-9, 10-/, /-12.
+ ASSERT_EQ(5, histogram_proto.bucket_size());
+
+ // 1-5 becomes 1-/ (max is same as next min).
+ EXPECT_TRUE(histogram_proto.bucket(0).has_min());
+ EXPECT_FALSE(histogram_proto.bucket(0).has_max());
+ EXPECT_EQ(1, histogram_proto.bucket(0).min());
+
+ // 5-7 stays 5-7 (no optimization possible).
+ EXPECT_TRUE(histogram_proto.bucket(1).has_min());
+ EXPECT_TRUE(histogram_proto.bucket(1).has_max());
+ EXPECT_EQ(5, histogram_proto.bucket(1).min());
+ EXPECT_EQ(7, histogram_proto.bucket(1).max());
+
+ // 8-9 becomes /-9 (min is same as max - 1).
+ EXPECT_FALSE(histogram_proto.bucket(2).has_min());
+ EXPECT_TRUE(histogram_proto.bucket(2).has_max());
+ EXPECT_EQ(9, histogram_proto.bucket(2).max());
+
+ // 10-11 becomes 10-/ (both optimizations apply, omit max is prioritized).
+ EXPECT_TRUE(histogram_proto.bucket(3).has_min());
+ EXPECT_FALSE(histogram_proto.bucket(3).has_max());
+ EXPECT_EQ(10, histogram_proto.bucket(3).min());
+
+ // 11-12 becomes /-12 (last record must keep max, min is same as max - 1).
+ EXPECT_FALSE(histogram_proto.bucket(4).has_min());
+ EXPECT_TRUE(histogram_proto.bucket(4).has_max());
+ EXPECT_EQ(12, histogram_proto.bucket(4).max());
+}
+
+TEST_F(MetricsLogTest, RecordEnvironment) {
+ TestMetricsServiceClient client;
+ TestMetricsLog log(kClientId, kSessionId, MetricsLog::ONGOING_LOG, &client);
+
+ DelegatingProvider delegating_provider;
+ log.RecordEnvironment(&delegating_provider);
+ // Check that the system profile on the log has the correct values set.
+ CheckSystemProfile(log.system_profile());
+}
+
+TEST_F(MetricsLogTest, RecordEnvironmentEnableDefault) {
+ TestMetricsServiceClient client;
+ TestMetricsLog log_unknown(kClientId, kSessionId, MetricsLog::ONGOING_LOG,
+ &client);
+
+ DelegatingProvider delegating_provider;
+ log_unknown.RecordEnvironment(&delegating_provider);
+ EXPECT_FALSE(log_unknown.system_profile().has_uma_default_state());
+
+ client.set_enable_default(EnableMetricsDefault::OPT_IN);
+ TestMetricsLog log_opt_in(kClientId, kSessionId, MetricsLog::ONGOING_LOG,
+ &client);
+ log_opt_in.RecordEnvironment(&delegating_provider);
+ EXPECT_TRUE(log_opt_in.system_profile().has_uma_default_state());
+ EXPECT_EQ(SystemProfileProto_UmaDefaultState_OPT_IN,
+ log_opt_in.system_profile().uma_default_state());
+
+ client.set_enable_default(EnableMetricsDefault::OPT_OUT);
+ TestMetricsLog log_opt_out(kClientId, kSessionId, MetricsLog::ONGOING_LOG,
+ &client);
+ log_opt_out.RecordEnvironment(&delegating_provider);
+ EXPECT_TRUE(log_opt_out.system_profile().has_uma_default_state());
+ EXPECT_EQ(SystemProfileProto_UmaDefaultState_OPT_OUT,
+ log_opt_out.system_profile().uma_default_state());
+
+ client.set_reporting_is_managed(true);
+ TestMetricsLog log_managed(kClientId, kSessionId, MetricsLog::ONGOING_LOG,
+ &client);
+ log_managed.RecordEnvironment(&delegating_provider);
+ EXPECT_TRUE(log_managed.system_profile().has_uma_default_state());
+ EXPECT_EQ(SystemProfileProto_UmaDefaultState_POLICY_FORCED_ENABLED,
+ log_managed.system_profile().uma_default_state());
+}
+
+TEST_F(MetricsLogTest, InitialLogStabilityMetrics) {
+ TestMetricsServiceClient client;
+ TestMetricsLog log(kClientId, kSessionId, MetricsLog::INITIAL_STABILITY_LOG,
+ &client);
+ TestMetricsProvider* test_provider = new TestMetricsProvider();
+ DelegatingProvider delegating_provider;
+ delegating_provider.RegisterMetricsProvider(
+ base::WrapUnique<MetricsProvider>(test_provider));
+ log.RecordEnvironment(&delegating_provider);
+ log.RecordPreviousSessionData(&delegating_provider);
+
+ // The test provider should have been called upon to provide initial
+ // stability and regular stability metrics.
+ EXPECT_TRUE(test_provider->provide_initial_stability_metrics_called());
+ EXPECT_TRUE(test_provider->provide_stability_metrics_called());
+}
+
+TEST_F(MetricsLogTest, OngoingLogStabilityMetrics) {
+ TestMetricsServiceClient client;
+ TestMetricsLog log(kClientId, kSessionId, MetricsLog::ONGOING_LOG, &client);
+ TestMetricsProvider* test_provider = new TestMetricsProvider();
+ DelegatingProvider delegating_provider;
+ delegating_provider.RegisterMetricsProvider(
+ base::WrapUnique<MetricsProvider>(test_provider));
+ log.RecordEnvironment(&delegating_provider);
+ log.RecordCurrentSessionData(&delegating_provider, base::TimeDelta(),
+ base::TimeDelta());
+
+ // The test provider should have been called upon to provide regular but not
+ // initial stability metrics.
+ EXPECT_FALSE(test_provider->provide_initial_stability_metrics_called());
+ EXPECT_TRUE(test_provider->provide_stability_metrics_called());
+}
+
+TEST_F(MetricsLogTest, ChromeChannelWrittenToProtobuf) {
+ TestMetricsServiceClient client;
+ TestMetricsLog log(kClientId, kSessionId, MetricsLog::ONGOING_LOG, &client);
+ EXPECT_TRUE(log.uma_proto().system_profile().has_channel());
+}
+
+TEST_F(MetricsLogTest, ProductNotSetIfDefault) {
+ TestMetricsServiceClient client;
+ EXPECT_EQ(ChromeUserMetricsExtension::CHROME, client.GetProduct());
+ TestMetricsLog log(kClientId, kSessionId, MetricsLog::ONGOING_LOG, &client);
+ // Check that the product isn't set, since it's default and also verify the
+ // default value is indeed equal to Chrome.
+ EXPECT_FALSE(log.uma_proto().has_product());
+ EXPECT_EQ(ChromeUserMetricsExtension::CHROME, log.uma_proto().product());
+}
+
+TEST_F(MetricsLogTest, ProductSetIfNotDefault) {
+ const int32_t kTestProduct = 100;
+ EXPECT_NE(ChromeUserMetricsExtension::CHROME, kTestProduct);
+
+ TestMetricsServiceClient client;
+ client.set_product(kTestProduct);
+ TestMetricsLog log(kClientId, kSessionId, MetricsLog::ONGOING_LOG, &client);
+ // Check that the product is set to |kTestProduct|.
+ EXPECT_TRUE(log.uma_proto().has_product());
+ EXPECT_EQ(kTestProduct, log.uma_proto().product());
+}
+
+TEST_F(MetricsLogTest, TruncateEvents) {
+ TestMetricsServiceClient client;
+ TestMetricsLog log(kClientId, kSessionId, MetricsLog::ONGOING_LOG, &client);
+
+ for (int i = 0; i < internal::kUserActionEventLimit * 2; ++i) {
+ log.RecordUserAction("BasicAction");
+ EXPECT_EQ(i + 1, log.uma_proto().user_action_event_size());
+ }
+ for (int i = 0; i < internal::kOmniboxEventLimit * 2; ++i) {
+ // Add an empty omnibox event. Not fully realistic since these are normally
+ // supplied by a metrics provider.
+ log.mutable_uma_proto()->add_omnibox_event();
+ EXPECT_EQ(i + 1, log.uma_proto().omnibox_event_size());
+ }
+
+ // Truncate, and check that the current size is the limit.
+ log.TruncateEvents();
+ EXPECT_EQ(internal::kUserActionEventLimit,
+ log.uma_proto().user_action_event_size());
+ EXPECT_EQ(internal::kOmniboxEventLimit, log.uma_proto().omnibox_event_size());
+}
+
+} // namespace metrics
diff --git a/components/metrics/metrics_log_uploader.h b/components/metrics/metrics_log_uploader.h
new file mode 100644
index 0000000..882bab0
--- /dev/null
+++ b/components/metrics/metrics_log_uploader.h
@@ -0,0 +1,46 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_METRICS_LOG_UPLOADER_H_
+#define COMPONENTS_METRICS_METRICS_LOG_UPLOADER_H_
+
+#include <string>
+
+#include "base/callback.h"
+#include "base/macros.h"
+#include "base/strings/string_piece.h"
+
+namespace metrics {
+
+class ReportingInfo;
+
+// MetricsLogUploader is an abstract base class for uploading UMA logs on behalf
+// of MetricsService.
+class MetricsLogUploader {
+ public:
+ // Type for OnUploadComplete callbacks. These callbacks will receive three
+ // parameters: A response code, a net error code, and a boolean specifying
+ // if the connection was secure (over HTTPS).
+ typedef base::Callback<void(int, int, bool)> UploadCallback;
+
+ // Possible service types. This should correspond to a type from
+ // DataUseUserData.
+ enum MetricServiceType {
+ UMA,
+ UKM,
+ };
+
+ virtual ~MetricsLogUploader() {}
+
+ // Uploads a log with the specified |compressed_log_data| and |log_hash|.
+ // |log_hash| is expected to be the hex-encoded SHA1 hash of the log data
+ // before compression.
+ virtual void UploadLog(const std::string& compressed_log_data,
+ const std::string& log_hash,
+ const ReportingInfo& reporting_info) = 0;
+};
+
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_METRICS_LOG_UPLOADER_H_
diff --git a/components/metrics/metrics_pref_names.cc b/components/metrics/metrics_pref_names.cc
new file mode 100644
index 0000000..5dcb253
--- /dev/null
+++ b/components/metrics/metrics_pref_names.cc
@@ -0,0 +1,228 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/metrics_pref_names.h"
+
+namespace metrics {
+namespace prefs {
+
+// Set once, to the current epoch time, on the first run of chrome on this
+// machine. Attached to metrics reports forever thereafter.
+const char kInstallDate[] = "uninstall_metrics.installation_date2";
+
+// The metrics client GUID.
+// Note: The name client_id2 is a result of creating
+// new prefs to do a one-time reset of the previous values.
+const char kMetricsClientID[] = "user_experience_metrics.client_id2";
+
+// An enum value indicating the default value of the enable metrics reporting
+// checkbox shown during first-run. If it's opt-in, then the checkbox defaulted
+// to unchecked, if it's opt-out, then it defaulted to checked. This value is
+// only recorded during first-run, so older clients will not set it. The enum
+// used for the value is metrics::MetricsServiceClient::EnableMetricsDefault.
+const char kMetricsDefaultOptIn[] = "user_experience_metrics.default_opt_in";
+
+// Array of dictionaries that are each UMA logs that were supposed to be sent in
+// the first minute of a browser session. These logs include things like crash
+// count info, etc.
+const char kMetricsInitialLogs[] = "user_experience_metrics.initial_logs2";
+
+// The metrics entropy source.
+// Note: The name low_entropy_source2 is a result of creating
+// new prefs to do a one-time reset of the previous values.
+const char kMetricsLowEntropySource[] =
+ "user_experience_metrics.low_entropy_source2";
+
+// A machine ID used to detect when underlying hardware changes. It is only
+// stored locally and never transmitted in metrics reports.
+const char kMetricsMachineId[] = "user_experience_metrics.machine_id";
+
+// Array of dictionaries that are each UMA logs that were not sent because the
+// browser terminated before these accumulated metrics could be sent. These
+// logs typically include histograms and memory reports, as well as ongoing
+// user activities.
+const char kMetricsOngoingLogs[] = "user_experience_metrics.ongoing_logs2";
+
+// Boolean that indicates a cloned install has been detected and the metrics
+// client id and low entropy source should be reset.
+const char kMetricsResetIds[] = "user_experience_metrics.reset_metrics_ids";
+
+// Boolean that specifies whether or not crash reporting and metrics reporting
+// are sent over the network for analysis.
+const char kMetricsReportingEnabled[] =
+ "user_experience_metrics.reporting_enabled";
+
+// Date/time when the user opted in to UMA and generated the client id most
+// recently (local machine time, stored as a 64-bit time_t value).
+const char kMetricsReportingEnabledTimestamp[] =
+ "user_experience_metrics.client_id_timestamp";
+
+// The metrics client session ID.
+const char kMetricsSessionID[] = "user_experience_metrics.session_id";
+
+// The prefix of the last-seen timestamp for persistent histogram files.
+// Values are named for the files themselves.
+const char kMetricsLastSeenPrefix[] =
+ "user_experience_metrics.last_seen.";
+
+// Number of times the browser has been able to register crash reporting.
+const char kStabilityBreakpadRegistrationSuccess[] =
+ "user_experience_metrics.stability.breakpad_registration_ok";
+
+// Number of times the browser has failed to register crash reporting.
+const char kStabilityBreakpadRegistrationFail[] =
+ "user_experience_metrics.stability.breakpad_registration_fail";
+
+// A time stamp at which time the browser was known to be alive. Used to
+// evaluate whether the browser crash was due to a whole system crash.
+// At minimum this is updated each time the "exited_cleanly" preference is
+// modified, but can also be optionally updated on a slow schedule.
+const char kStabilityBrowserLastLiveTimeStamp[] =
+ "user_experience_metrics.stability.browser_last_live_timestamp";
+
+// Total number of child process crashes (other than renderer / extension
+// renderer ones, and plugin children, which are counted separately) since the
+// last report.
+const char kStabilityChildProcessCrashCount[] =
+ "user_experience_metrics.stability.child_process_crash_count";
+
+// Number of times the application exited uncleanly since the last report.
+const char kStabilityCrashCount[] =
+ "user_experience_metrics.stability.crash_count";
+
+// Number of times the application exited uncleanly since the last report
+// without gms core update.
+const char kStabilityCrashCountWithoutGmsCoreUpdate[] =
+ "user_experience_metrics.stability.crash_count_without_gms_core_update";
+
+// Number of times the initial stability log upload was deferred to the next
+// startup.
+const char kStabilityDeferredCount[] =
+ "user_experience_metrics.stability.deferred_count";
+
+// Number of times stability data was discarded. This is accumulated since the
+// last report, even across versions.
+const char kStabilityDiscardCount[] =
+ "user_experience_metrics.stability.discard_count";
+
+// Number of times the browser has been run under a debugger.
+const char kStabilityDebuggerPresent[] =
+ "user_experience_metrics.stability.debugger_present";
+
+// Number of times the browser has not been run under a debugger.
+const char kStabilityDebuggerNotPresent[] =
+ "user_experience_metrics.stability.debugger_not_present";
+
+// An enum value to indicate the execution phase the browser was in.
+const char kStabilityExecutionPhase[] =
+ "user_experience_metrics.stability.execution_phase";
+
+// True if the previous run of the program exited cleanly.
+const char kStabilityExitedCleanly[] =
+ "user_experience_metrics.stability.exited_cleanly";
+
+// Number of times an extension renderer process crashed since the last report.
+const char kStabilityExtensionRendererCrashCount[] =
+ "user_experience_metrics.stability.extension_renderer_crash_count";
+
+// Number of times an extension renderer process failed to launch since the last
+// report.
+const char kStabilityExtensionRendererFailedLaunchCount[] =
+ "user_experience_metrics.stability.extension_renderer_failed_launch_count";
+
+// Number of times an extension renderer process successfully launched since the
+// last report.
+const char kStabilityExtensionRendererLaunchCount[] =
+ "user_experience_metrics.stability.extension_renderer_launch_count";
+
+// The GMS core version used in Chrome.
+const char kStabilityGmsCoreVersion[] =
+ "user_experience_metrics.stability.gms_core_version";
+
+// Number of times the session end did not complete.
+const char kStabilityIncompleteSessionEndCount[] =
+ "user_experience_metrics.stability.incomplete_session_end_count";
+
+// Number of times the application was launched since last report.
+const char kStabilityLaunchCount[] =
+ "user_experience_metrics.stability.launch_count";
+
+// Number of times a page load event occurred since the last report.
+const char kStabilityPageLoadCount[] =
+ "user_experience_metrics.stability.page_load_count";
+
+// Number of times a renderer process crashed since the last report.
+const char kStabilityRendererCrashCount[] =
+ "user_experience_metrics.stability.renderer_crash_count";
+
+// Number of times a renderer process failed to launch since the last report.
+const char kStabilityRendererFailedLaunchCount[] =
+ "user_experience_metrics.stability.renderer_failed_launch_count";
+
+// Number of times the renderer has become non-responsive since the last
+// report.
+const char kStabilityRendererHangCount[] =
+ "user_experience_metrics.stability.renderer_hang_count";
+
+// Number of times a renderer process successfully launched since the last
+// report.
+const char kStabilityRendererLaunchCount[] =
+ "user_experience_metrics.stability.renderer_launch_count";
+
+// Base64 encoded serialized UMA system profile proto from the previous session.
+const char kStabilitySavedSystemProfile[] =
+ "user_experience_metrics.stability.saved_system_profile";
+
+// SHA-1 hash of the serialized UMA system profile proto (hex encoded).
+const char kStabilitySavedSystemProfileHash[] =
+ "user_experience_metrics.stability.saved_system_profile_hash";
+
+// False if we received a session end and either we crashed during processing
+// the session end or ran out of time and windows terminated us.
+const char kStabilitySessionEndCompleted[] =
+ "user_experience_metrics.stability.session_end_completed";
+
+// Build time, in seconds since an epoch, which is used to assure that stability
+// metrics reported reflect stability of the same build.
+const char kStabilityStatsBuildTime[] =
+ "user_experience_metrics.stability.stats_buildtime";
+
+// Version string of previous run, which is used to assure that stability
+// metrics reported under current version reflect stability of the same version.
+const char kStabilityStatsVersion[] =
+ "user_experience_metrics.stability.stats_version";
+
+// Number of times the application exited uncleanly and the system session
+// embedding the browser session ended abnormally since the last report.
+// Windows only.
+const char kStabilitySystemCrashCount[] =
+ "user_experience_metrics.stability.system_crash_count";
+
+// Number of times the version number stored in prefs did not match the
+// serialized system profile version number.
+const char kStabilityVersionMismatchCount[] =
+ "user_experience_metrics.stability.version_mismatch_count";
+
+// The keys below are strictly increasing counters over the lifetime of
+// a chrome installation. They are (optionally) sent up to the uninstall
+// survey in the event of uninstallation.
+const char kUninstallLaunchCount[] = "uninstall_metrics.launch_count";
+const char kUninstallMetricsPageLoadCount[] =
+ "uninstall_metrics.page_load_count";
+const char kUninstallMetricsUptimeSec[] = "uninstall_metrics.uptime_sec";
+
+// Dictionary for measuring cellular data used by UKM service during last 7
+// days.
+const char kUkmCellDataUse[] = "user_experience_metrics.ukm_cell_datause";
+
+// Dictionary for measuring cellular data used by UMA service during last 7
+// days.
+const char kUmaCellDataUse[] = "user_experience_metrics.uma_cell_datause";
+
+// Dictionary for measuring cellular data used by user including chrome services
+// per day.
+const char kUserCellDataUse[] = "user_experience_metrics.user_call_datause";
+
+} // namespace prefs
+} // namespace metrics
diff --git a/components/metrics/metrics_pref_names.h b/components/metrics/metrics_pref_names.h
new file mode 100644
index 0000000..2024c49
--- /dev/null
+++ b/components/metrics/metrics_pref_names.h
@@ -0,0 +1,79 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_METRICS_PREF_NAMES_H_
+#define COMPONENTS_METRICS_METRICS_PREF_NAMES_H_
+
+namespace metrics {
+namespace prefs {
+
+// Alphabetical list of preference names specific to the metrics
+// component. Document each in the .cc file.
+extern const char kDeprecatedMetricsInitialLogs[];
+extern const char kDeprecatedMetricsOngoingLogs[];
+extern const char kInstallDate[];
+extern const char kMetricsClientID[];
+extern const char kMetricsDefaultOptIn[];
+extern const char kMetricsInitialLogs[];
+extern const char kMetricsLowEntropySource[];
+extern const char kMetricsMachineId[];
+extern const char kMetricsOngoingLogs[];
+extern const char kMetricsResetIds[];
+
+// For finding out whether metrics and crash reporting is enabled use the
+// relevant embedder-specific subclass of MetricsServiceAccessor instead of
+// reading this pref directly; see the comments on metrics_service_accessor.h.
+// (NOTE: If within //chrome, use
+// ChromeMetricsServiceAccessor::IsMetricsAndCrashReportingEnabled()).
+extern const char kMetricsReportingEnabled[];
+extern const char kMetricsReportingEnabledTimestamp[];
+extern const char kMetricsSessionID[];
+extern const char kMetricsLastSeenPrefix[];
+
+// Preferences for recording stability logs.
+extern const char kStabilityBreakpadRegistrationFail[];
+extern const char kStabilityBreakpadRegistrationSuccess[];
+extern const char kStabilityBrowserLastLiveTimeStamp[];
+extern const char kStabilityChildProcessCrashCount[];
+extern const char kStabilityCrashCount[];
+extern const char kStabilityCrashCountWithoutGmsCoreUpdate[];
+extern const char kStabilityDebuggerNotPresent[];
+extern const char kStabilityDebuggerPresent[];
+extern const char kStabilityDeferredCount[];
+extern const char kStabilityDiscardCount[];
+extern const char kStabilityExecutionPhase[];
+extern const char kStabilityExitedCleanly[];
+extern const char kStabilityExtensionRendererCrashCount[];
+extern const char kStabilityExtensionRendererFailedLaunchCount[];
+extern const char kStabilityExtensionRendererLaunchCount[];
+extern const char kStabilityGmsCoreVersion[];
+extern const char kStabilityIncompleteSessionEndCount[];
+extern const char kStabilityLaunchCount[];
+extern const char kStabilityPageLoadCount[];
+extern const char kStabilityRendererCrashCount[];
+extern const char kStabilityRendererFailedLaunchCount[];
+extern const char kStabilityRendererHangCount[];
+extern const char kStabilityRendererLaunchCount[];
+extern const char kStabilitySavedSystemProfile[];
+extern const char kStabilitySavedSystemProfileHash[];
+extern const char kStabilitySessionEndCompleted[];
+extern const char kStabilityStatsBuildTime[];
+extern const char kStabilityStatsVersion[];
+extern const char kStabilitySystemCrashCount[];
+extern const char kStabilityVersionMismatchCount[];
+
+// Preferences for generating metrics at uninstall time.
+extern const char kUninstallLaunchCount[];
+extern const char kUninstallMetricsPageLoadCount[];
+extern const char kUninstallMetricsUptimeSec[];
+
+// For measuring data use for throttling UMA log uploads on cellular.
+extern const char kUkmCellDataUse[];
+extern const char kUmaCellDataUse[];
+extern const char kUserCellDataUse[];
+
+} // namespace prefs
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_METRICS_PREF_NAMES_H_
diff --git a/components/metrics/metrics_provider.cc b/components/metrics/metrics_provider.cc
new file mode 100644
index 0000000..d5408bb
--- /dev/null
+++ b/components/metrics/metrics_provider.cc
@@ -0,0 +1,75 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/metrics_provider.h"
+
+#include "third_party/metrics_proto/chrome_user_metrics_extension.pb.h"
+
+namespace metrics {
+
+MetricsProvider::MetricsProvider() {
+}
+
+MetricsProvider::~MetricsProvider() {
+}
+
+void MetricsProvider::Init() {
+}
+
+void MetricsProvider::AsyncInit(const base::Closure& done_callback) {
+ done_callback.Run();
+}
+
+void MetricsProvider::OnDidCreateMetricsLog() {
+}
+
+void MetricsProvider::OnRecordingEnabled() {
+}
+
+void MetricsProvider::OnRecordingDisabled() {
+}
+
+void MetricsProvider::OnAppEnterBackground() {
+}
+
+bool MetricsProvider::ProvideIndependentMetrics(
+ SystemProfileProto* system_profile_proto,
+ base::HistogramSnapshotManager* snapshot_manager) {
+ return false;
+}
+
+void MetricsProvider::ProvideSystemProfileMetrics(
+ SystemProfileProto* system_profile_proto) {
+}
+
+bool MetricsProvider::HasPreviousSessionData() {
+ return false;
+}
+
+void MetricsProvider::ProvidePreviousSessionData(
+ ChromeUserMetricsExtension* uma_proto) {
+ ProvideStabilityMetrics(uma_proto->mutable_system_profile());
+}
+
+void MetricsProvider::ProvideCurrentSessionData(
+ ChromeUserMetricsExtension* uma_proto) {
+ ProvideStabilityMetrics(uma_proto->mutable_system_profile());
+}
+
+void MetricsProvider::ProvideStabilityMetrics(
+ SystemProfileProto* system_profile_proto) {
+}
+
+void MetricsProvider::ClearSavedStabilityMetrics() {
+}
+
+void MetricsProvider::RecordHistogramSnapshots(
+ base::HistogramSnapshotManager* snapshot_manager) {
+}
+
+void MetricsProvider::RecordInitialHistogramSnapshots(
+ base::HistogramSnapshotManager* snapshot_manager) {
+}
+
+} // namespace metrics
diff --git a/components/metrics/metrics_provider.h b/components/metrics/metrics_provider.h
new file mode 100644
index 0000000..591c63f
--- /dev/null
+++ b/components/metrics/metrics_provider.h
@@ -0,0 +1,111 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_METRICS_PROVIDER_H_
+#define COMPONENTS_METRICS_METRICS_PROVIDER_H_
+
+#include "base/callback.h"
+#include "base/macros.h"
+
+namespace base {
+class HistogramSnapshotManager;
+} // namespace base
+
+namespace metrics {
+
+class ChromeUserMetricsExtension;
+class SystemProfileProto;
+
+// MetricsProvider is an interface allowing different parts of the UMA protos to
+// be filled out by different classes.
+class MetricsProvider {
+ public:
+ MetricsProvider();
+ virtual ~MetricsProvider();
+
+ // Called after initialiazation of MetricsService and field trials.
+ virtual void Init();
+
+ // Called during service initialization to allow the provider to start any
+ // async initialization tasks. The service will wait for the provider to
+ // call |done_callback| before generating logs for the current session.
+ virtual void AsyncInit(const base::Closure& done_callback);
+
+ // Called when a new MetricsLog is created.
+ virtual void OnDidCreateMetricsLog();
+
+ // Called when metrics recording has been enabled.
+ virtual void OnRecordingEnabled();
+
+ // Called when metrics recording has been disabled.
+ virtual void OnRecordingDisabled();
+
+ // Called when the application is going into background mode, on platforms
+ // where applications may be killed when going into the background (Android,
+ // iOS). Providers that buffer histogram data in memory should persist
+ // histograms in this callback, as the application may be killed without
+ // further notification after this callback.
+ virtual void OnAppEnterBackground();
+
+ // Provides a complete and independent system profile + metrics for uploading.
+ // Any histograms added to the |snapshot_manager| will also be included. A
+ // return of false indicates there are none. Will be called repeatedly until
+ // there is nothing else.
+ virtual bool ProvideIndependentMetrics(
+ SystemProfileProto* system_profile_proto,
+ base::HistogramSnapshotManager* snapshot_manager);
+
+ // Provides additional metrics into the system profile.
+ virtual void ProvideSystemProfileMetrics(
+ SystemProfileProto* system_profile_proto);
+
+ // Called once at startup to see whether this provider has critical data
+ // to provide about the previous session.
+ // Returning true will trigger ProvidePreviousSessionData on all other
+ // registered metrics providers.
+ // Default implementation always returns false.
+ virtual bool HasPreviousSessionData();
+
+ // Called when building a log about the previous session, so the provider
+ // can provide data about it. Stability metrics can be provided
+ // directly into |stability_proto| fields or by logging stability histograms
+ // via the UMA_STABILITY_HISTOGRAM_ENUMERATION() macro.
+ virtual void ProvidePreviousSessionData(
+ ChromeUserMetricsExtension* uma_proto);
+
+ // Called when building a log about the current session, so the provider
+ // can provide data about it.
+ virtual void ProvideCurrentSessionData(ChromeUserMetricsExtension* uma_proto);
+
+ // Provides additional stability metrics. Stability metrics can be provided
+ // directly into |stability_proto| fields or by logging stability histograms
+ // via the UMA_STABILITY_HISTOGRAM_ENUMERATION() macro.
+ virtual void ProvideStabilityMetrics(
+ SystemProfileProto* system_profile_proto);
+
+ // Called to indicate that saved stability prefs should be cleared, e.g.
+ // because they are from an old version and should not be kept.
+ virtual void ClearSavedStabilityMetrics();
+
+ // Called during regular collection to explicitly load histogram snapshots
+ // using a snapshot manager. PrepareDeltas() will have already been called
+ // and FinishDeltas() will be called later; calls to only PrepareDelta(),
+ // not PrepareDeltas (plural), should be made.
+ virtual void RecordHistogramSnapshots(
+ base::HistogramSnapshotManager* snapshot_manager);
+
+ // Called during collection of initial metrics to explicitly load histogram
+ // snapshots using a snapshot manager. PrepareDeltas() will have already
+ // been called and FinishDeltas() will be called later; calls to only
+ // PrepareDelta(), not PrepareDeltas (plural), should be made.
+ virtual void RecordInitialHistogramSnapshots(
+ base::HistogramSnapshotManager* snapshot_manager);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MetricsProvider);
+};
+
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_METRICS_PROVIDER_H_
diff --git a/components/metrics/metrics_reporting_default_state.cc b/components/metrics/metrics_reporting_default_state.cc
new file mode 100644
index 0000000..a166600
--- /dev/null
+++ b/components/metrics/metrics_reporting_default_state.cc
@@ -0,0 +1,36 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/metrics_reporting_default_state.h"
+
+#include "components/metrics/metrics_pref_names.h"
+#include "components/prefs/pref_registry_simple.h"
+#include "components/prefs/pref_service.h"
+
+namespace metrics {
+
+void RegisterMetricsReportingStatePrefs(PrefRegistrySimple* registry) {
+ registry->RegisterIntegerPref(prefs::kMetricsDefaultOptIn,
+ EnableMetricsDefault::DEFAULT_UNKNOWN);
+}
+
+void RecordMetricsReportingDefaultState(PrefService* local_state,
+ EnableMetricsDefault default_state) {
+ DCHECK_EQ(GetMetricsReportingDefaultState(local_state),
+ EnableMetricsDefault::DEFAULT_UNKNOWN);
+ local_state->SetInteger(prefs::kMetricsDefaultOptIn, default_state);
+}
+
+void ForceRecordMetricsReportingDefaultState(
+ PrefService* local_state,
+ EnableMetricsDefault default_state) {
+ local_state->SetInteger(prefs::kMetricsDefaultOptIn, default_state);
+}
+
+EnableMetricsDefault GetMetricsReportingDefaultState(PrefService* local_state) {
+ return static_cast<EnableMetricsDefault>(
+ local_state->GetInteger(prefs::kMetricsDefaultOptIn));
+}
+
+} // namespace metrics
diff --git a/components/metrics/metrics_reporting_default_state.h b/components/metrics/metrics_reporting_default_state.h
new file mode 100644
index 0000000..ad81e99
--- /dev/null
+++ b/components/metrics/metrics_reporting_default_state.h
@@ -0,0 +1,48 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_METRICS_REPORTING_DEFAULT_STATE_H_
+#define COMPONENTS_METRICS_METRICS_REPORTING_DEFAULT_STATE_H_
+
+class PrefRegistrySimple;
+class PrefService;
+
+namespace metrics {
+
+// Metrics reporting default state. This relates to the state of the enable
+// checkbox shown on first-run. This enum is used to store values in a pref, and
+// shouldn't be renumbered.
+enum EnableMetricsDefault {
+ // We only record the value during first-run. The default of existing
+ // installs is considered unknown.
+ DEFAULT_UNKNOWN,
+ // The first-run checkbox was unchecked by default.
+ OPT_IN,
+ // The first-run checkbox was checked by default.
+ OPT_OUT,
+};
+
+// Register prefs relating to metrics reporting state. Currently only registers
+// a pref for metrics reporting default opt-in state.
+void RegisterMetricsReportingStatePrefs(PrefRegistrySimple* registry);
+
+// Sets whether metrics reporting was opt-in or not. If it was opt-in, then the
+// enable checkbox on first-run was default unchecked. If it was opt-out, then
+// the checkbox was default checked. This should only be set once, and only
+// during first-run.
+void RecordMetricsReportingDefaultState(PrefService* local_state,
+ EnableMetricsDefault default_state);
+
+// Same as above, but does not verify the current state is UNKNOWN.
+void ForceRecordMetricsReportingDefaultState(
+ PrefService* local_state,
+ EnableMetricsDefault default_state);
+
+// Gets information about the default value for the enable metrics reporting
+// checkbox shown during first-run.
+EnableMetricsDefault GetMetricsReportingDefaultState(PrefService* local_state);
+
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_METRICS_REPORTING_DEFAULT_STATE_H_
diff --git a/components/metrics/metrics_reporting_service.cc b/components/metrics/metrics_reporting_service.cc
new file mode 100644
index 0000000..4ab08d7
--- /dev/null
+++ b/components/metrics/metrics_reporting_service.cc
@@ -0,0 +1,97 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// ReportingService specialized to report UMA metrics.
+
+#include "components/metrics/metrics_reporting_service.h"
+
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/metrics/histogram_functions.h"
+#include "base/metrics/histogram_macros.h"
+#include "components/metrics/metrics_pref_names.h"
+#include "components/metrics/persisted_logs_metrics_impl.h"
+#include "components/metrics/url_constants.h"
+#include "components/prefs/pref_registry_simple.h"
+
+namespace metrics {
+
+namespace {
+
+// If an upload fails, and the transmission was over this byte count, then we
+// will discard the log, and not try to retransmit it. We also don't persist
+// the log to the prefs for transmission during the next chrome session if this
+// limit is exceeded.
+const size_t kUploadLogAvoidRetransmitSize = 100 * 1024;
+
+} // namespace
+
+// static
+void MetricsReportingService::RegisterPrefs(PrefRegistrySimple* registry) {
+ ReportingService::RegisterPrefs(registry);
+ MetricsLogStore::RegisterPrefs(registry);
+}
+
+MetricsReportingService::MetricsReportingService(MetricsServiceClient* client,
+ PrefService* local_state)
+ : ReportingService(client, local_state, kUploadLogAvoidRetransmitSize),
+ metrics_log_store_(local_state, kUploadLogAvoidRetransmitSize) {}
+
+MetricsReportingService::~MetricsReportingService() {}
+
+LogStore* MetricsReportingService::log_store() {
+ return &metrics_log_store_;
+}
+
+std::string MetricsReportingService::GetUploadUrl() const {
+ return client()->GetMetricsServerUrl();
+}
+
+std::string MetricsReportingService::GetInsecureUploadUrl() const {
+ return client()->GetInsecureMetricsServerUrl();
+}
+
+base::StringPiece MetricsReportingService::upload_mime_type() const {
+ return kDefaultMetricsMimeType;
+}
+
+MetricsLogUploader::MetricServiceType MetricsReportingService::service_type()
+ const {
+ return MetricsLogUploader::UMA;
+}
+
+void MetricsReportingService::LogActualUploadInterval(
+ base::TimeDelta interval) {
+ UMA_HISTOGRAM_CUSTOM_COUNTS("UMA.ActualLogUploadInterval",
+ interval.InMinutes(), 1,
+ base::TimeDelta::FromHours(12).InMinutes(), 50);
+}
+
+void MetricsReportingService::LogCellularConstraint(bool upload_canceled) {
+ UMA_HISTOGRAM_BOOLEAN("UMA.LogUpload.Canceled.CellularConstraint",
+ upload_canceled);
+}
+
+void MetricsReportingService::LogResponseOrErrorCode(int response_code,
+ int error_code,
+ bool was_https) {
+ if (was_https) {
+ base::UmaHistogramSparse("UMA.LogUpload.ResponseOrErrorCode",
+ response_code >= 0 ? response_code : error_code);
+ } else {
+ base::UmaHistogramSparse("UMA.LogUpload.ResponseOrErrorCode.HTTP",
+ response_code >= 0 ? response_code : error_code);
+ }
+}
+
+void MetricsReportingService::LogSuccess(size_t log_size) {
+ UMA_HISTOGRAM_COUNTS_10000("UMA.LogSize.OnSuccess", log_size / 1024);
+}
+
+void MetricsReportingService::LogLargeRejection(size_t log_size) {
+ UMA_HISTOGRAM_COUNTS_1M("UMA.Large Rejected Log was Discarded",
+ static_cast<int>(log_size));
+}
+
+} // namespace metrics
diff --git a/components/metrics/metrics_reporting_service.h b/components/metrics/metrics_reporting_service.h
new file mode 100644
index 0000000..38961b2
--- /dev/null
+++ b/components/metrics/metrics_reporting_service.h
@@ -0,0 +1,68 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file defines a service that sends metrics logs to a server.
+
+#ifndef COMPONENTS_METRICS_METRICS_REPORTING_SERVICE_H_
+#define COMPONENTS_METRICS_METRICS_REPORTING_SERVICE_H_
+
+#include <stdint.h>
+
+#include <string>
+
+#include "base/macros.h"
+#include "components/metrics/metrics_log_store.h"
+#include "components/metrics/reporting_service.h"
+
+class PrefService;
+class PrefRegistrySimple;
+
+namespace metrics {
+
+class MetricsServiceClient;
+
+// MetricsReportingService is concrete implementation of ReportingService for
+// UMA logs. It uses a MetricsLogStore as its LogStore, reports to the UMA
+// endpoint, and logs some histograms with the UMA prefix.
+class MetricsReportingService : public ReportingService {
+ public:
+ // Creates a ReportingService with the given |client|, |local_state|.
+ // Does not take ownership of the parameters; instead it stores a weak
+ // pointer to each. Caller should ensure that the parameters are valid for
+ // the lifetime of this class.
+ MetricsReportingService(MetricsServiceClient* client,
+ PrefService* local_state);
+ ~MetricsReportingService() override;
+
+ MetricsLogStore* metrics_log_store() { return &metrics_log_store_; }
+ const MetricsLogStore* metrics_log_store() const {
+ return &metrics_log_store_;
+ }
+
+ // Registers local state prefs used by this class.
+ static void RegisterPrefs(PrefRegistrySimple* registry);
+
+ private:
+ // ReportingService:
+ LogStore* log_store() override;
+ std::string GetUploadUrl() const override;
+ std::string GetInsecureUploadUrl() const override;
+ base::StringPiece upload_mime_type() const override;
+ MetricsLogUploader::MetricServiceType service_type() const override;
+ void LogActualUploadInterval(base::TimeDelta interval) override;
+ void LogCellularConstraint(bool upload_canceled) override;
+ void LogResponseOrErrorCode(int response_code,
+ int error_code,
+ bool was_https) override;
+ void LogSuccess(size_t log_size) override;
+ void LogLargeRejection(size_t log_size) override;
+
+ MetricsLogStore metrics_log_store_;
+
+ DISALLOW_COPY_AND_ASSIGN(MetricsReportingService);
+};
+
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_METRICS_REPORTING_SERVICE_H_
diff --git a/components/metrics/metrics_rotation_scheduler.cc b/components/metrics/metrics_rotation_scheduler.cc
new file mode 100644
index 0000000..c760b4a
--- /dev/null
+++ b/components/metrics/metrics_rotation_scheduler.cc
@@ -0,0 +1,53 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/metrics_rotation_scheduler.h"
+
+#include "base/metrics/histogram_macros.h"
+#include "build/build_config.h"
+
+namespace metrics {
+
+MetricsRotationScheduler::MetricsRotationScheduler(
+ const base::Closure& upload_callback,
+ const base::Callback<base::TimeDelta(void)>& upload_interval_callback)
+ : MetricsScheduler(upload_callback),
+ init_task_complete_(false),
+ waiting_for_init_task_complete_(false),
+ upload_interval_callback_(upload_interval_callback) {}
+
+MetricsRotationScheduler::~MetricsRotationScheduler() {}
+
+void MetricsRotationScheduler::InitTaskComplete() {
+ DCHECK(!init_task_complete_);
+ init_task_complete_ = true;
+ if (waiting_for_init_task_complete_) {
+ waiting_for_init_task_complete_ = false;
+ TriggerTask();
+ } else {
+ LogMetricsInitSequence(INIT_TASK_COMPLETED_FIRST);
+ }
+}
+
+void MetricsRotationScheduler::RotationFinished() {
+ TaskDone(upload_interval_callback_.Run());
+}
+
+void MetricsRotationScheduler::LogMetricsInitSequence(InitSequence sequence) {
+ UMA_HISTOGRAM_ENUMERATION("UMA.InitSequence", sequence,
+ INIT_SEQUENCE_ENUM_SIZE);
+}
+
+void MetricsRotationScheduler::TriggerTask() {
+ // If the timer fired before the init task has completed, don't trigger the
+ // upload yet - wait for the init task to complete and do it then.
+ if (!init_task_complete_) {
+ LogMetricsInitSequence(TIMER_FIRED_FIRST);
+ waiting_for_init_task_complete_ = true;
+ return;
+ }
+ MetricsScheduler::TriggerTask();
+}
+
+} // namespace metrics
diff --git a/components/metrics/metrics_rotation_scheduler.h b/components/metrics/metrics_rotation_scheduler.h
new file mode 100644
index 0000000..20a2a44
--- /dev/null
+++ b/components/metrics/metrics_rotation_scheduler.h
@@ -0,0 +1,62 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_METRICS_ROTATION_SCHEDULER_H_
+#define COMPONENTS_METRICS_METRICS_ROTATION_SCHEDULER_H_
+
+#include "base/callback.h"
+#include "base/macros.h"
+#include "base/time/time.h"
+#include "components/metrics/metrics_scheduler.h"
+
+namespace metrics {
+
+// Scheduler task to drive a MetricsService object's uploading.
+class MetricsRotationScheduler : public MetricsScheduler {
+ public:
+ // Creates MetricsRotationScheduler object with the given |rotation_callback|
+ // callback to call when log rotation should happen and |interval_callback|
+ // to determine the interval between rotations in steady state.
+ // |rotation_callback| must arrange to call RotationFinished on completion.
+ MetricsRotationScheduler(
+ const base::Closure& rotation_callback,
+ const base::Callback<base::TimeDelta(void)>& interval_callback);
+ ~MetricsRotationScheduler() override;
+
+ // Callback from MetricsService when the startup init task has completed.
+ void InitTaskComplete();
+
+ // Callback from MetricsService when a triggered rotation finishes.
+ void RotationFinished();
+
+ protected:
+ enum InitSequence {
+ TIMER_FIRED_FIRST,
+ INIT_TASK_COMPLETED_FIRST,
+ INIT_SEQUENCE_ENUM_SIZE,
+ };
+
+ private:
+ // Record the init sequence order histogram.
+ virtual void LogMetricsInitSequence(InitSequence sequence);
+
+ // MetricsScheduler:
+ void TriggerTask() override;
+
+ // Whether the InitTaskComplete() callback has been called.
+ bool init_task_complete_;
+
+ // Whether the initial scheduled upload timer has fired before the init task
+ // has been completed.
+ bool waiting_for_init_task_complete_;
+
+ // Callback function used to get the standard upload time.
+ base::Callback<base::TimeDelta(void)> upload_interval_callback_;
+
+ DISALLOW_COPY_AND_ASSIGN(MetricsRotationScheduler);
+};
+
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_METRICS_ROTATION_SCHEDULER_H_
diff --git a/components/metrics/metrics_scheduler.cc b/components/metrics/metrics_scheduler.cc
new file mode 100644
index 0000000..86d1157
--- /dev/null
+++ b/components/metrics/metrics_scheduler.cc
@@ -0,0 +1,64 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/metrics_scheduler.h"
+
+#include "build/build_config.h"
+
+namespace metrics {
+
+namespace {
+
+// The delay, in seconds, after startup before sending the first log message.
+#if defined(OS_ANDROID) || defined(OS_IOS)
+// Sessions are more likely to be short on a mobile device, so handle the
+// initial log quickly.
+const int kInitialIntervalSeconds = 15;
+#else
+const int kInitialIntervalSeconds = 60;
+#endif
+
+} // namespace
+
+MetricsScheduler::MetricsScheduler(const base::Closure& task_callback)
+ : task_callback_(task_callback),
+ interval_(base::TimeDelta::FromSeconds(kInitialIntervalSeconds)),
+ running_(false),
+ callback_pending_(false) {}
+
+MetricsScheduler::~MetricsScheduler() {}
+
+void MetricsScheduler::Start() {
+ running_ = true;
+ ScheduleNextTask();
+}
+
+void MetricsScheduler::Stop() {
+ running_ = false;
+ if (timer_.IsRunning())
+ timer_.Stop();
+}
+
+void MetricsScheduler::TaskDone(base::TimeDelta next_interval) {
+ DCHECK(callback_pending_);
+ callback_pending_ = false;
+ interval_ = next_interval;
+ if (running_)
+ ScheduleNextTask();
+}
+
+void MetricsScheduler::TriggerTask() {
+ callback_pending_ = true;
+ task_callback_.Run();
+}
+
+void MetricsScheduler::ScheduleNextTask() {
+ DCHECK(running_);
+ if (timer_.IsRunning() || callback_pending_)
+ return;
+
+ timer_.Start(FROM_HERE, interval_, this, &MetricsScheduler::TriggerTask);
+}
+
+} // namespace metrics
diff --git a/components/metrics/metrics_scheduler.h b/components/metrics/metrics_scheduler.h
new file mode 100644
index 0000000..fc031fb
--- /dev/null
+++ b/components/metrics/metrics_scheduler.h
@@ -0,0 +1,65 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_METRICS_SCHEDULER_H_
+#define COMPONENTS_METRICS_METRICS_SCHEDULER_H_
+
+#include "base/callback.h"
+#include "base/macros.h"
+#include "base/time/time.h"
+#include "base/timer/timer.h"
+
+namespace metrics {
+
+// Scheduler task to drive a MetricsService object's uploading.
+class MetricsScheduler {
+ public:
+ // Creates MetricsScheduler object with the given |task_callback|
+ // callback to call when a task should happen.
+ explicit MetricsScheduler(const base::Closure& task_callback);
+ virtual ~MetricsScheduler();
+
+ // Starts scheduling uploads. This in a no-op if the scheduler is already
+ // running, so it is safe to call more than once.
+ void Start();
+
+ // Stops scheduling uploads.
+ void Stop();
+
+ protected:
+ // Subclasses should provide task_callback with a wrapper to call this with.
+ // This indicates the triggered task was completed/cancelled and the next
+ // call can be scheduled.
+ void TaskDone(base::TimeDelta next_interval);
+
+ // Called by the Timer when it's time to run the task.
+ virtual void TriggerTask();
+
+ private:
+ // Schedules a future call to TriggerTask if one isn't already pending.
+ void ScheduleNextTask();
+
+ // The method to call when task should happen.
+ const base::Closure task_callback_;
+
+ // Uses a one-shot timer rather than a repeating one because the task may be
+ // async, and the length of the interval may change.
+ base::OneShotTimer timer_;
+
+ // The interval between being told an task is done and starting the next task.
+ base::TimeDelta interval_;
+
+ // Indicates that the scheduler is running (i.e., that Start has been called
+ // more recently than Stop).
+ bool running_;
+
+ // Indicates that the last triggered task hasn't resolved yet.
+ bool callback_pending_;
+
+ DISALLOW_COPY_AND_ASSIGN(MetricsScheduler);
+};
+
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_METRICS_SCHEDULER_H_
diff --git a/components/metrics/metrics_service.cc b/components/metrics/metrics_service.cc
new file mode 100644
index 0000000..bfa17fe
--- /dev/null
+++ b/components/metrics/metrics_service.cc
@@ -0,0 +1,921 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+//------------------------------------------------------------------------------
+// Description of the life cycle of a instance of MetricsService.
+//
+// OVERVIEW
+//
+// A MetricsService instance is typically created at application startup. It is
+// the central controller for the acquisition of log data, and the automatic
+// transmission of that log data to an external server. Its major job is to
+// manage logs, grouping them for transmission, and transmitting them. As part
+// of its grouping, MS finalizes logs by including some just-in-time gathered
+// memory statistics, snapshotting the current stats of numerous histograms,
+// closing the logs, translating to protocol buffer format, and compressing the
+// results for transmission. Transmission includes submitting a compressed log
+// as data in a URL-post, and retransmitting (or retaining at process
+// termination) if the attempted transmission failed. Retention across process
+// terminations is done using the the PrefServices facilities. The retained logs
+// (the ones that never got transmitted) are compressed and base64-encoded
+// before being persisted.
+//
+// Logs fall into one of two categories: "initial logs," and "ongoing logs."
+// There is at most one initial log sent for each complete run of Chrome (from
+// startup, to browser shutdown). An initial log is generally transmitted some
+// short time (1 minute?) after startup, and includes stats such as recent crash
+// info, the number and types of plugins, etc. The external server's response
+// to the initial log conceptually tells this MS if it should continue
+// transmitting logs (during this session). The server response can actually be
+// much more detailed, and always includes (at a minimum) how often additional
+// ongoing logs should be sent.
+//
+// After the above initial log, a series of ongoing logs will be transmitted.
+// The first ongoing log actually begins to accumulate information stating when
+// the MS was first constructed. Note that even though the initial log is
+// commonly sent a full minute after startup, the initial log does not include
+// much in the way of user stats. The most common interlog period (delay)
+// is 30 minutes. That time period starts when the first user action causes a
+// logging event. This means that if there is no user action, there may be long
+// periods without any (ongoing) log transmissions. Ongoing logs typically
+// contain very detailed records of user activities (ex: opened tab, closed
+// tab, fetched URL, maximized window, etc.) In addition, just before an
+// ongoing log is closed out, a call is made to gather memory statistics. Those
+// memory statistics are deposited into a histogram, and the log finalization
+// code is then called. In the finalization, a call to a Histogram server
+// acquires a list of all local histograms that have been flagged for upload
+// to the UMA server. The finalization also acquires the most recent number
+// of page loads, along with any counts of renderer or plugin crashes.
+//
+// When the browser shuts down, there will typically be a fragment of an ongoing
+// log that has not yet been transmitted. At shutdown time, that fragment is
+// closed (including snapshotting histograms), and persisted, for potential
+// transmission during a future run of the product.
+//
+// There are two slightly abnormal shutdown conditions. There is a
+// "disconnected scenario," and a "really fast startup and shutdown" scenario.
+// In the "never connected" situation, the user has (during the running of the
+// process) never established an internet connection. As a result, attempts to
+// transmit the initial log have failed, and a lot(?) of data has accumulated in
+// the ongoing log (which didn't yet get closed, because there was never even a
+// contemplation of sending it). There is also a kindred "lost connection"
+// situation, where a loss of connection prevented an ongoing log from being
+// transmitted, and a (still open) log was stuck accumulating a lot(?) of data,
+// while the earlier log retried its transmission. In both of these
+// disconnected situations, two logs need to be, and are, persistently stored
+// for future transmission.
+//
+// The other unusual shutdown condition, termed "really fast startup and
+// shutdown," involves the deliberate user termination of the process before
+// the initial log is even formed or transmitted. In that situation, no logging
+// is done, but the historical crash statistics remain (unlogged) for inclusion
+// in a future run's initial log. (i.e., we don't lose crash stats).
+//
+// With the above overview, we can now describe the state machine's various
+// states, based on the State enum specified in the state_ member. Those states
+// are:
+//
+// INITIALIZED, // Constructor was called.
+// INIT_TASK_SCHEDULED, // Waiting for deferred init tasks to finish.
+// INIT_TASK_DONE, // Waiting for timer to send initial log.
+// SENDING_LOGS, // Sending logs and creating new ones when we run out.
+//
+// In more detail, we have:
+//
+// INITIALIZED, // Constructor was called.
+// The MS has been constructed, but has taken no actions to compose the
+// initial log.
+//
+// INIT_TASK_SCHEDULED, // Waiting for deferred init tasks to finish.
+// Typically about 30 seconds after startup, a task is sent to a second thread
+// (the file thread) to perform deferred (lower priority and slower)
+// initialization steps such as getting the list of plugins. That task will
+// (when complete) make an async callback (via a Task) to indicate the
+// completion.
+//
+// INIT_TASK_DONE, // Waiting for timer to send initial log.
+// The callback has arrived, and it is now possible for an initial log to be
+// created. This callback typically arrives back less than one second after
+// the deferred init task is dispatched.
+//
+// SENDING_LOGS, // Sending logs an creating new ones when we run out.
+// Logs from previous sessions have been loaded, and initial logs have been
+// created (an optional stability log and the first metrics log). We will
+// send all of these logs, and when run out, we will start cutting new logs
+// to send. We will also cut a new log if we expect a shutdown.
+//
+// The progression through the above states is simple, and sequential.
+// States proceed from INITIAL to SENDING_LOGS, and remain in the latter until
+// shutdown.
+//
+// Also note that whenever we successfully send a log, we mirror the list
+// of logs into the PrefService. This ensures that IF we crash, we won't start
+// up and retransmit our old logs again.
+//
+// Due to race conditions, it is always possible that a log file could be sent
+// twice. For example, if a log file is sent, but not yet acknowledged by
+// the external server, and the user shuts down, then a copy of the log may be
+// saved for re-transmission. These duplicates could be filtered out server
+// side, but are not expected to be a significant problem.
+//
+//
+//------------------------------------------------------------------------------
+
+#include "components/metrics/metrics_service.h"
+
+#include <stddef.h>
+
+#include <algorithm>
+#include <utility>
+
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/location.h"
+#include "base/metrics/histogram_base.h"
+#include "base/metrics/histogram_functions.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/metrics/histogram_samples.h"
+#include "base/metrics/persistent_histogram_allocator.h"
+#include "base/metrics/statistics_recorder.h"
+#include "base/single_thread_task_runner.h"
+#include "base/strings/string_piece.h"
+#include "base/threading/sequenced_task_runner_handle.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+#include "components/metrics/environment_recorder.h"
+#include "components/metrics/field_trials_provider.h"
+#include "components/metrics/metrics_log.h"
+#include "components/metrics/metrics_log_manager.h"
+#include "components/metrics/metrics_log_uploader.h"
+#include "components/metrics/metrics_pref_names.h"
+#include "components/metrics/metrics_rotation_scheduler.h"
+#include "components/metrics/metrics_service_client.h"
+#include "components/metrics/metrics_state_manager.h"
+#include "components/metrics/persistent_system_profile.h"
+#include "components/metrics/stability_metrics_provider.h"
+#include "components/metrics/url_constants.h"
+#include "components/prefs/pref_registry_simple.h"
+#include "components/prefs/pref_service.h"
+#include "components/variations/entropy_provider.h"
+
+namespace metrics {
+
+namespace {
+
+// The delay, in seconds, after starting recording before doing expensive
+// initialization work.
+#if defined(OS_ANDROID) || defined(OS_IOS)
+// On mobile devices, a significant portion of sessions last less than a minute.
+// Use a shorter timer on these platforms to avoid losing data.
+// TODO(dfalcantara): To avoid delaying startup, tighten up initialization so
+// that it occurs after the user gets their initial page.
+const int kInitializationDelaySeconds = 5;
+#else
+const int kInitializationDelaySeconds = 30;
+#endif
+
+// The browser last live timestamp is updated every 15 minutes.
+const int kUpdateAliveTimestampSeconds = 15 * 60;
+
+#if defined(OS_ANDROID) || defined(OS_IOS)
+void MarkAppCleanShutdownAndCommit(CleanExitBeacon* clean_exit_beacon,
+ PrefService* local_state) {
+ clean_exit_beacon->WriteBeaconValue(true);
+ ExecutionPhaseManager(local_state).OnAppEnterBackground();
+ // Start writing right away (write happens on a different thread).
+ local_state->CommitPendingWrite();
+}
+#endif // defined(OS_ANDROID) || defined(OS_IOS)
+
+} // namespace
+
+// static
+MetricsService::ShutdownCleanliness MetricsService::clean_shutdown_status_ =
+ MetricsService::CLEANLY_SHUTDOWN;
+
+// static
+void MetricsService::RegisterPrefs(PrefRegistrySimple* registry) {
+ CleanExitBeacon::RegisterPrefs(registry);
+ MetricsStateManager::RegisterPrefs(registry);
+ MetricsLog::RegisterPrefs(registry);
+ StabilityMetricsProvider::RegisterPrefs(registry);
+ ExecutionPhaseManager::RegisterPrefs(registry);
+ MetricsReportingService::RegisterPrefs(registry);
+
+ registry->RegisterIntegerPref(prefs::kMetricsSessionID, -1);
+
+ registry->RegisterInt64Pref(prefs::kUninstallLaunchCount, 0);
+ registry->RegisterInt64Pref(prefs::kUninstallMetricsUptimeSec, 0);
+}
+
+MetricsService::MetricsService(MetricsStateManager* state_manager,
+ MetricsServiceClient* client,
+ PrefService* local_state)
+ : reporting_service_(client, local_state),
+ histogram_snapshot_manager_(this),
+ state_manager_(state_manager),
+ client_(client),
+ local_state_(local_state),
+ recording_state_(UNSET),
+ test_mode_active_(false),
+ state_(INITIALIZED),
+ idle_since_last_transmission_(false),
+ session_id_(-1),
+ self_ptr_factory_(this) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK(state_manager_);
+ DCHECK(client_);
+ DCHECK(local_state_);
+
+ RegisterMetricsProvider(
+ std::make_unique<StabilityMetricsProvider>(local_state_));
+
+ RegisterMetricsProvider(state_manager_->GetProvider());
+
+ RegisterMetricsProvider(std::make_unique<variations::FieldTrialsProvider>(
+ &synthetic_trial_registry_, base::StringPiece()));
+}
+
+MetricsService::~MetricsService() {
+ DisableRecording();
+}
+
+void MetricsService::InitializeMetricsRecordingState() {
+ reporting_service_.Initialize();
+ InitializeMetricsState();
+
+ base::Closure upload_callback =
+ base::Bind(&MetricsService::StartScheduledUpload,
+ self_ptr_factory_.GetWeakPtr());
+
+ rotation_scheduler_.reset(new MetricsRotationScheduler(
+ upload_callback,
+ // MetricsServiceClient outlives MetricsService, and
+ // MetricsRotationScheduler is tied to the lifetime of |this|.
+ base::Bind(&MetricsServiceClient::GetStandardUploadInterval,
+ base::Unretained(client_))));
+
+ // Init() has to be called after LogCrash() in order for LogCrash() to work.
+ delegating_provider_.Init();
+}
+
+void MetricsService::Start() {
+ HandleIdleSinceLastTransmission(false);
+ EnableRecording();
+ EnableReporting();
+}
+
+void MetricsService::StartRecordingForTests() {
+ test_mode_active_ = true;
+ EnableRecording();
+ DisableReporting();
+}
+
+void MetricsService::StartUpdatingLastLiveTimestamp() {
+ base::SequencedTaskRunnerHandle::Get()->PostDelayedTask(
+ FROM_HERE,
+ base::BindOnce(&MetricsService::UpdateLastLiveTimestampTask,
+ self_ptr_factory_.GetWeakPtr()),
+ base::TimeDelta::FromSeconds(kUpdateAliveTimestampSeconds));
+}
+
+void MetricsService::Stop() {
+ HandleIdleSinceLastTransmission(false);
+ DisableReporting();
+ DisableRecording();
+}
+
+void MetricsService::EnableReporting() {
+ if (reporting_service_.reporting_active())
+ return;
+ reporting_service_.EnableReporting();
+ StartSchedulerIfNecessary();
+}
+
+void MetricsService::DisableReporting() {
+ reporting_service_.DisableReporting();
+}
+
+std::string MetricsService::GetClientId() {
+ return state_manager_->client_id();
+}
+
+int64_t MetricsService::GetInstallDate() {
+ return state_manager_->GetInstallDate();
+}
+
+bool MetricsService::WasLastShutdownClean() const {
+ return state_manager_->clean_exit_beacon()->exited_cleanly();
+}
+
+void MetricsService::EnableRecording() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+
+ if (recording_state_ == ACTIVE)
+ return;
+ recording_state_ = ACTIVE;
+
+ state_manager_->ForceClientIdCreation();
+ client_->SetMetricsClientId(state_manager_->client_id());
+
+ SystemProfileProto system_profile;
+ MetricsLog::RecordCoreSystemProfile(client_, &system_profile);
+ GlobalPersistentSystemProfile::GetInstance()->SetSystemProfile(
+ system_profile, /*complete=*/false);
+
+ if (!log_manager_.current_log())
+ OpenNewLog();
+
+ delegating_provider_.OnRecordingEnabled();
+
+ base::RemoveActionCallback(action_callback_);
+ action_callback_ = base::Bind(&MetricsService::OnUserAction,
+ base::Unretained(this));
+ base::AddActionCallback(action_callback_);
+}
+
+void MetricsService::DisableRecording() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+
+ if (recording_state_ == INACTIVE)
+ return;
+ recording_state_ = INACTIVE;
+
+ base::RemoveActionCallback(action_callback_);
+
+ delegating_provider_.OnRecordingDisabled();
+
+ PushPendingLogsToPersistentStorage();
+}
+
+bool MetricsService::recording_active() const {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ return recording_state_ == ACTIVE;
+}
+
+bool MetricsService::reporting_active() const {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ return reporting_service_.reporting_active();
+}
+
+bool MetricsService::has_unsent_logs() const {
+ return reporting_service_.metrics_log_store()->has_unsent_logs();
+}
+
+void MetricsService::RecordDelta(const base::HistogramBase& histogram,
+ const base::HistogramSamples& snapshot) {
+ log_manager_.current_log()->RecordHistogramDelta(histogram.histogram_name(),
+ snapshot);
+}
+
+void MetricsService::HandleIdleSinceLastTransmission(bool in_idle) {
+ // If there wasn't a lot of action, maybe the computer was asleep, in which
+ // case, the log transmissions should have stopped. Here we start them up
+ // again.
+ if (!in_idle && idle_since_last_transmission_)
+ StartSchedulerIfNecessary();
+ idle_since_last_transmission_ = in_idle;
+}
+
+void MetricsService::OnApplicationNotIdle() {
+ if (recording_state_ == ACTIVE)
+ HandleIdleSinceLastTransmission(false);
+}
+
+void MetricsService::RecordStartOfSessionEnd() {
+ LogCleanShutdown(false);
+}
+
+void MetricsService::RecordCompletedSessionEnd() {
+ LogCleanShutdown(true);
+}
+
+#if defined(OS_ANDROID) || defined(OS_IOS)
+void MetricsService::OnAppEnterBackground() {
+ rotation_scheduler_->Stop();
+ reporting_service_.Stop();
+
+ MarkAppCleanShutdownAndCommit(state_manager_->clean_exit_beacon(),
+ local_state_);
+
+ // Give providers a chance to persist histograms as part of being
+ // backgrounded.
+ delegating_provider_.OnAppEnterBackground();
+
+ // At this point, there's no way of knowing when the process will be
+ // killed, so this has to be treated similar to a shutdown, closing and
+ // persisting all logs. Unlinke a shutdown, the state is primed to be ready
+ // to continue logging and uploading if the process does return.
+ if (recording_active() && state_ >= SENDING_LOGS) {
+ PushPendingLogsToPersistentStorage();
+ // Persisting logs closes the current log, so start recording a new log
+ // immediately to capture any background work that might be done before the
+ // process is killed.
+ OpenNewLog();
+ }
+}
+
+void MetricsService::OnAppEnterForeground() {
+ state_manager_->clean_exit_beacon()->WriteBeaconValue(false);
+ ExecutionPhaseManager(local_state_).OnAppEnterForeground();
+ StartSchedulerIfNecessary();
+}
+#else
+void MetricsService::LogNeedForCleanShutdown() {
+ state_manager_->clean_exit_beacon()->WriteBeaconValue(false);
+ // Redundant setting to be sure we call for a clean shutdown.
+ clean_shutdown_status_ = NEED_TO_SHUTDOWN;
+}
+#endif // defined(OS_ANDROID) || defined(OS_IOS)
+
+// static
+void MetricsService::SetExecutionPhase(ExecutionPhase execution_phase,
+ PrefService* local_state) {
+ ExecutionPhaseManager(local_state).SetExecutionPhase(execution_phase);
+}
+
+void MetricsService::RecordBreakpadRegistration(bool success) {
+ StabilityMetricsProvider(local_state_).RecordBreakpadRegistration(success);
+}
+
+void MetricsService::RecordBreakpadHasDebugger(bool has_debugger) {
+ StabilityMetricsProvider(local_state_)
+ .RecordBreakpadHasDebugger(has_debugger);
+}
+
+void MetricsService::ClearSavedStabilityMetrics() {
+ delegating_provider_.ClearSavedStabilityMetrics();
+}
+
+void MetricsService::PushExternalLog(const std::string& log) {
+ log_store()->StoreLog(log, MetricsLog::ONGOING_LOG);
+}
+
+void MetricsService::UpdateMetricsUsagePrefs(const std::string& service_name,
+ int message_size,
+ bool is_cellular) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ reporting_service_.UpdateMetricsUsagePrefs(service_name, message_size,
+ is_cellular);
+}
+
+//------------------------------------------------------------------------------
+// private methods
+//------------------------------------------------------------------------------
+
+
+//------------------------------------------------------------------------------
+// Initialization methods
+
+void MetricsService::InitializeMetricsState() {
+ const int64_t buildtime = MetricsLog::GetBuildTime();
+ const std::string version = client_->GetVersionString();
+
+ bool version_changed = false;
+ EnvironmentRecorder recorder(local_state_);
+ int64_t previous_buildtime = recorder.GetLastBuildtime();
+ std::string previous_version = recorder.GetLastVersion();
+ if (previous_buildtime != buildtime || previous_version != version) {
+ recorder.SetBuildtimeAndVersion(buildtime, version);
+ version_changed = true;
+ }
+
+ session_id_ = local_state_->GetInteger(prefs::kMetricsSessionID);
+
+ StabilityMetricsProvider provider(local_state_);
+ if (!state_manager_->clean_exit_beacon()->exited_cleanly()) {
+ provider.LogCrash(
+ state_manager_->clean_exit_beacon()->browser_last_live_timestamp());
+ // Reset flag, and wait until we call LogNeedForCleanShutdown() before
+ // monitoring.
+ state_manager_->clean_exit_beacon()->WriteBeaconValue(true);
+ ExecutionPhaseManager manager(local_state_);
+ base::UmaHistogramSparse("Chrome.Browser.CrashedExecutionPhase",
+ static_cast<int>(manager.GetExecutionPhase()));
+ manager.SetExecutionPhase(ExecutionPhase::UNINITIALIZED_PHASE);
+ }
+
+ // HasPreviousSessionData is called first to ensure it is never bypassed.
+ const bool is_initial_stability_log_required =
+ delegating_provider_.HasPreviousSessionData() ||
+ !state_manager_->clean_exit_beacon()->exited_cleanly();
+ bool has_initial_stability_log = false;
+ if (is_initial_stability_log_required) {
+ // If the previous session didn't exit cleanly, or if any provider
+ // explicitly requests it, prepare an initial stability log -
+ // provided UMA is enabled.
+ if (state_manager_->IsMetricsReportingEnabled()) {
+ has_initial_stability_log = PrepareInitialStabilityLog(previous_version);
+ if (!has_initial_stability_log)
+ provider.LogStabilityLogDeferred();
+ }
+ }
+
+ // If the version changed, but no initial stability log was generated, clear
+ // the stability stats from the previous version (so that they don't get
+ // attributed to the current version). This could otherwise happen due to a
+ // number of different edge cases, such as if the last version crashed before
+ // it could save off a system profile or if UMA reporting is disabled (which
+ // normally results in stats being accumulated).
+ if (version_changed && !has_initial_stability_log) {
+ ClearSavedStabilityMetrics();
+ provider.LogStabilityDataDiscarded();
+ }
+
+ // If the version changed, the system profile is obsolete and needs to be
+ // cleared. This is to avoid the stability data misattribution that could
+ // occur if the current version crashed before saving its own system profile.
+ // Note however this clearing occurs only after preparing the initial
+ // stability log, an operation that requires the previous version's system
+ // profile. At this point, stability metrics pertaining to the previous
+ // version have been cleared.
+ if (version_changed)
+ recorder.ClearEnvironmentFromPrefs();
+
+ // Update session ID.
+ ++session_id_;
+ local_state_->SetInteger(prefs::kMetricsSessionID, session_id_);
+
+ // Notify stability metrics providers about the launch.
+ provider.LogLaunch();
+ SetExecutionPhase(ExecutionPhase::START_METRICS_RECORDING, local_state_);
+ provider.CheckLastSessionEndCompleted();
+
+ // Call GetUptimes() for the first time, thus allowing all later calls
+ // to record incremental uptimes accurately.
+ base::TimeDelta ignored_uptime_parameter;
+ base::TimeDelta startup_uptime;
+ GetUptimes(local_state_, &startup_uptime, &ignored_uptime_parameter);
+ DCHECK_EQ(0, startup_uptime.InMicroseconds());
+
+ // Bookkeeping for the uninstall metrics.
+ IncrementLongPrefsValue(prefs::kUninstallLaunchCount);
+}
+
+void MetricsService::OnUserAction(const std::string& action) {
+ log_manager_.current_log()->RecordUserAction(action);
+ HandleIdleSinceLastTransmission(false);
+}
+
+void MetricsService::FinishedInitTask() {
+ DCHECK_EQ(INIT_TASK_SCHEDULED, state_);
+ state_ = INIT_TASK_DONE;
+
+ // Create the initial log.
+ if (!initial_metrics_log_) {
+ initial_metrics_log_ = CreateLog(MetricsLog::ONGOING_LOG);
+ delegating_provider_.OnDidCreateMetricsLog();
+ }
+
+ rotation_scheduler_->InitTaskComplete();
+}
+
+void MetricsService::GetUptimes(PrefService* pref,
+ base::TimeDelta* incremental_uptime,
+ base::TimeDelta* uptime) {
+ base::TimeTicks now = base::TimeTicks::Now();
+ // If this is the first call, init |first_updated_time_| and
+ // |last_updated_time_|.
+ if (last_updated_time_.is_null()) {
+ first_updated_time_ = now;
+ last_updated_time_ = now;
+ }
+ *incremental_uptime = now - last_updated_time_;
+ *uptime = now - first_updated_time_;
+ last_updated_time_ = now;
+
+ const int64_t incremental_time_secs = incremental_uptime->InSeconds();
+ if (incremental_time_secs > 0) {
+ int64_t metrics_uptime = pref->GetInt64(prefs::kUninstallMetricsUptimeSec);
+ metrics_uptime += incremental_time_secs;
+ pref->SetInt64(prefs::kUninstallMetricsUptimeSec, metrics_uptime);
+ }
+}
+
+//------------------------------------------------------------------------------
+// Recording control methods
+
+void MetricsService::OpenNewLog() {
+ DCHECK(!log_manager_.current_log());
+
+ log_manager_.BeginLoggingWithLog(CreateLog(MetricsLog::ONGOING_LOG));
+ delegating_provider_.OnDidCreateMetricsLog();
+ if (state_ == INITIALIZED) {
+ // We only need to schedule that run once.
+ state_ = INIT_TASK_SCHEDULED;
+
+ base::SequencedTaskRunnerHandle::Get()->PostDelayedTask(
+ FROM_HERE,
+ base::BindOnce(&MetricsService::StartInitTask,
+ self_ptr_factory_.GetWeakPtr()),
+ base::TimeDelta::FromSeconds(kInitializationDelaySeconds));
+
+ base::SequencedTaskRunnerHandle::Get()->PostDelayedTask(
+ FROM_HERE,
+ base::BindOnce(&MetricsService::PrepareProviderMetricsTask,
+ self_ptr_factory_.GetWeakPtr()),
+ base::TimeDelta::FromSeconds(2 * kInitializationDelaySeconds));
+ }
+}
+
+void MetricsService::StartInitTask() {
+ delegating_provider_.AsyncInit(base::Bind(&MetricsService::FinishedInitTask,
+ self_ptr_factory_.GetWeakPtr()));
+}
+
+void MetricsService::CloseCurrentLog() {
+ if (!log_manager_.current_log())
+ return;
+
+ // If a persistent allocator is in use, update its internal histograms (such
+ // as how much memory is being used) before reporting.
+ base::PersistentHistogramAllocator* allocator =
+ base::GlobalHistogramAllocator::Get();
+ if (allocator)
+ allocator->UpdateTrackingHistograms();
+
+ // Put incremental data (histogram deltas, and realtime stats deltas) at the
+ // end of all log transmissions (initial log handles this separately).
+ // RecordIncrementalStabilityElements only exists on the derived
+ // MetricsLog class.
+ MetricsLog* current_log = log_manager_.current_log();
+ DCHECK(current_log);
+ RecordCurrentEnvironment(current_log);
+ base::TimeDelta incremental_uptime;
+ base::TimeDelta uptime;
+ GetUptimes(local_state_, &incremental_uptime, &uptime);
+ current_log->RecordCurrentSessionData(&delegating_provider_,
+ incremental_uptime, uptime);
+ RecordCurrentHistograms();
+ current_log->TruncateEvents();
+ DVLOG(1) << "Generated an ongoing log.";
+ log_manager_.FinishCurrentLog(log_store());
+}
+
+void MetricsService::PushPendingLogsToPersistentStorage() {
+ if (state_ < SENDING_LOGS)
+ return; // We didn't and still don't have time to get plugin list etc.
+
+ CloseCurrentLog();
+ log_store()->PersistUnsentLogs();
+}
+
+//------------------------------------------------------------------------------
+// Transmission of logs methods
+
+void MetricsService::StartSchedulerIfNecessary() {
+ // Never schedule cutting or uploading of logs in test mode.
+ if (test_mode_active_)
+ return;
+
+ // Even if reporting is disabled, the scheduler is needed to trigger the
+ // creation of the initial log, which must be done in order for any logs to be
+ // persisted on shutdown or backgrounding.
+ if (recording_active() &&
+ (reporting_active() || state_ < SENDING_LOGS)) {
+ rotation_scheduler_->Start();
+ reporting_service_.Start();
+ }
+}
+
+void MetricsService::StartScheduledUpload() {
+ DVLOG(1) << "StartScheduledUpload";
+ DCHECK(state_ >= INIT_TASK_DONE);
+
+ // If we're getting no notifications, then the log won't have much in it, and
+ // it's possible the computer is about to go to sleep, so don't upload and
+ // stop the scheduler.
+ // If recording has been turned off, the scheduler doesn't need to run.
+ // If reporting is off, proceed if the initial log hasn't been created, since
+ // that has to happen in order for logs to be cut and stored when persisting.
+ // TODO(stuartmorgan): Call Stop() on the scheduler when reporting and/or
+ // recording are turned off instead of letting it fire and then aborting.
+ if (idle_since_last_transmission_ ||
+ !recording_active() ||
+ (!reporting_active() && state_ >= SENDING_LOGS)) {
+ rotation_scheduler_->Stop();
+ rotation_scheduler_->RotationFinished();
+ return;
+ }
+
+ // If there are unsent logs, send the next one. If not, start the asynchronous
+ // process of finalizing the current log for upload.
+ if (state_ == SENDING_LOGS && has_unsent_logs()) {
+ reporting_service_.Start();
+ rotation_scheduler_->RotationFinished();
+ } else {
+ // There are no logs left to send, so start creating a new one.
+ client_->CollectFinalMetricsForLog(
+ base::Bind(&MetricsService::OnFinalLogInfoCollectionDone,
+ self_ptr_factory_.GetWeakPtr()));
+ }
+}
+
+void MetricsService::OnFinalLogInfoCollectionDone() {
+ DVLOG(1) << "OnFinalLogInfoCollectionDone";
+
+ // Abort if metrics were turned off during the final info gathering.
+ if (!recording_active()) {
+ rotation_scheduler_->Stop();
+ rotation_scheduler_->RotationFinished();
+ return;
+ }
+
+ if (state_ == INIT_TASK_DONE) {
+ PrepareInitialMetricsLog();
+ } else {
+ DCHECK_EQ(SENDING_LOGS, state_);
+ CloseCurrentLog();
+ OpenNewLog();
+ }
+ reporting_service_.Start();
+ rotation_scheduler_->RotationFinished();
+ HandleIdleSinceLastTransmission(true);
+}
+
+bool MetricsService::PrepareInitialStabilityLog(
+ const std::string& prefs_previous_version) {
+ DCHECK_EQ(INITIALIZED, state_);
+
+ std::unique_ptr<MetricsLog> initial_stability_log(
+ CreateLog(MetricsLog::INITIAL_STABILITY_LOG));
+
+ // Do not call OnDidCreateMetricsLog here because the stability
+ // log describes stats from the _previous_ session.
+ std::string system_profile_app_version;
+ if (!initial_stability_log->LoadSavedEnvironmentFromPrefs(
+ local_state_, &system_profile_app_version)) {
+ return false;
+ }
+ if (system_profile_app_version != prefs_previous_version)
+ StabilityMetricsProvider(local_state_).LogStabilityVersionMismatch();
+
+ log_manager_.PauseCurrentLog();
+ log_manager_.BeginLoggingWithLog(std::move(initial_stability_log));
+
+ // Note: Some stability providers may record stability stats via histograms,
+ // so this call has to be after BeginLoggingWithLog().
+ log_manager_.current_log()->RecordPreviousSessionData(&delegating_provider_);
+ RecordCurrentStabilityHistograms();
+
+ DVLOG(1) << "Generated an stability log.";
+ log_manager_.FinishCurrentLog(log_store());
+ log_manager_.ResumePausedLog();
+
+ // Store unsent logs, including the stability log that was just saved, so
+ // that they're not lost in case of a crash before upload time.
+ log_store()->PersistUnsentLogs();
+
+ return true;
+}
+
+void MetricsService::PrepareInitialMetricsLog() {
+ DCHECK_EQ(INIT_TASK_DONE, state_);
+
+ RecordCurrentEnvironment(initial_metrics_log_.get());
+ base::TimeDelta incremental_uptime;
+ base::TimeDelta uptime;
+ GetUptimes(local_state_, &incremental_uptime, &uptime);
+
+ // Histograms only get written to the current log, so make the new log current
+ // before writing them.
+ log_manager_.PauseCurrentLog();
+ log_manager_.BeginLoggingWithLog(std::move(initial_metrics_log_));
+
+ // Note: Some stability providers may record stability stats via histograms,
+ // so this call has to be after BeginLoggingWithLog().
+ log_manager_.current_log()->RecordCurrentSessionData(
+ &delegating_provider_, base::TimeDelta(), base::TimeDelta());
+ RecordCurrentHistograms();
+
+ DVLOG(1) << "Generated an initial log.";
+ log_manager_.FinishCurrentLog(log_store());
+ log_manager_.ResumePausedLog();
+
+ // Store unsent logs, including the initial log that was just saved, so
+ // that they're not lost in case of a crash before upload time.
+ log_store()->PersistUnsentLogs();
+
+ state_ = SENDING_LOGS;
+}
+
+void MetricsService::IncrementLongPrefsValue(const char* path) {
+ int64_t value = local_state_->GetInt64(path);
+ local_state_->SetInt64(path, value + 1);
+}
+
+bool MetricsService::UmaMetricsProperlyShutdown() {
+ CHECK(clean_shutdown_status_ == CLEANLY_SHUTDOWN ||
+ clean_shutdown_status_ == NEED_TO_SHUTDOWN);
+ return clean_shutdown_status_ == CLEANLY_SHUTDOWN;
+}
+
+void MetricsService::RegisterMetricsProvider(
+ std::unique_ptr<MetricsProvider> provider) {
+ DCHECK_EQ(INITIALIZED, state_);
+ delegating_provider_.RegisterMetricsProvider(std::move(provider));
+}
+
+void MetricsService::CheckForClonedInstall() {
+ state_manager_->CheckForClonedInstall();
+}
+
+std::unique_ptr<MetricsLog> MetricsService::CreateLog(
+ MetricsLog::LogType log_type) {
+ return std::make_unique<MetricsLog>(state_manager_->client_id(), session_id_,
+ log_type, client_);
+}
+
+std::string MetricsService::RecordCurrentEnvironmentHelper(
+ MetricsLog* log,
+ PrefService* local_state,
+ DelegatingProvider* delegating_provider) {
+ const SystemProfileProto& system_profile =
+ log->RecordEnvironment(delegating_provider);
+ EnvironmentRecorder recorder(local_state);
+ return recorder.SerializeAndRecordEnvironmentToPrefs(system_profile);
+}
+
+void MetricsService::RecordCurrentEnvironment(MetricsLog* log) {
+ DCHECK(client_);
+ std::string serialized_proto =
+ RecordCurrentEnvironmentHelper(log, local_state_, &delegating_provider_);
+ GlobalPersistentSystemProfile::GetInstance()->SetSystemProfile(
+ serialized_proto, /*complete=*/true);
+ client_->OnEnvironmentUpdate(&serialized_proto);
+}
+
+void MetricsService::RecordCurrentHistograms() {
+ DCHECK(log_manager_.current_log());
+
+ // "true" indicates that StatisticsRecorder should include histograms held in
+ // persistent storage.
+ base::StatisticsRecorder::PrepareDeltas(
+ true, base::Histogram::kNoFlags,
+ base::Histogram::kUmaTargetedHistogramFlag, &histogram_snapshot_manager_);
+ delegating_provider_.RecordHistogramSnapshots(&histogram_snapshot_manager_);
+}
+
+void MetricsService::RecordCurrentStabilityHistograms() {
+ DCHECK(log_manager_.current_log());
+ // "true" indicates that StatisticsRecorder should include histograms held in
+ // persistent storage.
+ base::StatisticsRecorder::PrepareDeltas(
+ true, base::Histogram::kNoFlags,
+ base::Histogram::kUmaStabilityHistogramFlag,
+ &histogram_snapshot_manager_);
+ delegating_provider_.RecordInitialHistogramSnapshots(
+ &histogram_snapshot_manager_);
+}
+
+bool MetricsService::PrepareProviderMetricsLog() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+
+ // Create a new log. This will have some defaut values injected in it but
+ // those will be overwritten when an embedded profile is extracted.
+ std::unique_ptr<MetricsLog> log = CreateLog(MetricsLog::INDEPENDENT_LOG);
+
+ for (auto& provider : delegating_provider_.GetProviders()) {
+ if (log->LoadIndependentMetrics(provider.get())) {
+ log_manager_.PauseCurrentLog();
+ log_manager_.BeginLoggingWithLog(std::move(log));
+ log_manager_.FinishCurrentLog(log_store());
+ log_manager_.ResumePausedLog();
+ return true;
+ }
+ }
+ return false;
+}
+
+void MetricsService::PrepareProviderMetricsTask() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ bool found = PrepareProviderMetricsLog();
+ base::TimeDelta next_check = found ? base::TimeDelta::FromSeconds(5)
+ : base::TimeDelta::FromMinutes(15);
+ base::SequencedTaskRunnerHandle::Get()->PostDelayedTask(
+ FROM_HERE,
+ base::BindOnce(&MetricsService::PrepareProviderMetricsTask,
+ self_ptr_factory_.GetWeakPtr()),
+ next_check);
+}
+
+void MetricsService::LogCleanShutdown(bool end_completed) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ // Redundant setting to assure that we always reset this value at shutdown
+ // (and that we don't use some alternate path, and not call LogCleanShutdown).
+ clean_shutdown_status_ = CLEANLY_SHUTDOWN;
+ client_->OnLogCleanShutdown();
+ state_manager_->clean_exit_beacon()->WriteBeaconValue(true);
+ SetExecutionPhase(ExecutionPhase::SHUTDOWN_COMPLETE, local_state_);
+ StabilityMetricsProvider(local_state_).MarkSessionEndCompleted(end_completed);
+}
+
+void MetricsService::UpdateLastLiveTimestampTask() {
+ state_manager_->clean_exit_beacon()->UpdateLastLiveTimestamp();
+
+ // Schecule the next update.
+ StartUpdatingLastLiveTimestamp();
+}
+
+} // namespace metrics
diff --git a/components/metrics/metrics_service.h b/components/metrics/metrics_service.h
new file mode 100644
index 0000000..6471314
--- /dev/null
+++ b/components/metrics/metrics_service.h
@@ -0,0 +1,402 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file defines a service that collects information about the user
+// experience in order to help improve future versions of the app.
+
+#ifndef COMPONENTS_METRICS_METRICS_SERVICE_H_
+#define COMPONENTS_METRICS_METRICS_SERVICE_H_
+
+#include <stdint.h>
+
+#include <map>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "base/gtest_prod_util.h"
+#include "base/macros.h"
+#include "base/memory/weak_ptr.h"
+#include "base/metrics/field_trial.h"
+#include "base/metrics/histogram_flattener.h"
+#include "base/metrics/histogram_snapshot_manager.h"
+#include "base/metrics/user_metrics.h"
+#include "base/sequence_checker.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+#include "components/metrics/clean_exit_beacon.h"
+#include "components/metrics/delegating_provider.h"
+#include "components/metrics/execution_phase.h"
+#include "components/metrics/metrics_log.h"
+#include "components/metrics/metrics_log_manager.h"
+#include "components/metrics/metrics_log_store.h"
+#include "components/metrics/metrics_provider.h"
+#include "components/metrics/metrics_reporting_service.h"
+#include "components/metrics/net/network_metrics_provider.h"
+#include "components/variations/synthetic_trial_registry.h"
+
+class PrefService;
+class PrefRegistrySimple;
+
+namespace base {
+class HistogramSamples;
+class PrefService;
+}
+
+namespace metrics {
+
+class MetricsRotationScheduler;
+class MetricsServiceClient;
+class MetricsStateManager;
+
+// See metrics_service.cc for a detailed description.
+class MetricsService : public base::HistogramFlattener {
+ public:
+ // Creates the MetricsService with the given |state_manager|, |client|, and
+ // |local_state|. Does not take ownership of the paramaters; instead stores
+ // a weak pointer to each. Caller should ensure that the parameters are valid
+ // for the lifetime of this class.
+ MetricsService(MetricsStateManager* state_manager,
+ MetricsServiceClient* client,
+ PrefService* local_state);
+ ~MetricsService() override;
+
+ // Initializes metrics recording state. Updates various bookkeeping values in
+ // prefs and sets up the scheduler. This is a separate function rather than
+ // being done by the constructor so that field trials could be created before
+ // this is run.
+ void InitializeMetricsRecordingState();
+
+ // Starts the metrics system, turning on recording and uploading of metrics.
+ // Should be called when starting up with metrics enabled, or when metrics
+ // are turned on.
+ void Start();
+
+ // Starts the metrics system in a special test-only mode. Metrics won't ever
+ // be uploaded or persisted in this mode, but metrics will be recorded in
+ // memory.
+ void StartRecordingForTests();
+
+ // Starts updating the "last live" browser timestamp.
+ void StartUpdatingLastLiveTimestamp();
+
+ // Shuts down the metrics system. Should be called at shutdown, or if metrics
+ // are turned off.
+ void Stop();
+
+ // Enable/disable transmission of accumulated logs and crash reports (dumps).
+ // Calling Start() automatically enables reporting, but sending is
+ // asyncronous so this can be called immediately after Start() to prevent
+ // any uploading.
+ void EnableReporting();
+ void DisableReporting();
+
+ // Returns the client ID for this client, or the empty string if metrics
+ // recording is not currently running.
+ std::string GetClientId();
+
+ // Returns the install date of the application, in seconds since the epoch.
+ int64_t GetInstallDate();
+
+ // Returns the date at which the current metrics client ID was created as
+ // an int64_t containing seconds since the epoch.
+ int64_t GetMetricsReportingEnabledDate();
+
+ // Returns true if the last session exited cleanly.
+ bool WasLastShutdownClean() const;
+
+ // Registers local state prefs used by this class.
+ static void RegisterPrefs(PrefRegistrySimple* registry);
+
+ // HistogramFlattener:
+ void RecordDelta(const base::HistogramBase& histogram,
+ const base::HistogramSamples& snapshot) override;
+
+ // This should be called when the application is not idle, i.e. the user seems
+ // to be interacting with the application.
+ void OnApplicationNotIdle();
+
+ // Invoked when we get a WM_SESSIONEND. This places a value in prefs that is
+ // reset when RecordCompletedSessionEnd is invoked.
+ void RecordStartOfSessionEnd();
+
+ // This should be called when the application is shutting down. It records
+ // that session end was successful.
+ void RecordCompletedSessionEnd();
+
+#if defined(OS_ANDROID) || defined(OS_IOS)
+ // Called when the application is going into background mode.
+ void OnAppEnterBackground();
+
+ // Called when the application is coming out of background mode.
+ void OnAppEnterForeground();
+#else
+ // Set the dirty flag, which will require a later call to LogCleanShutdown().
+ void LogNeedForCleanShutdown();
+#endif // defined(OS_ANDROID) || defined(OS_IOS)
+
+ static void SetExecutionPhase(ExecutionPhase execution_phase,
+ PrefService* local_state);
+
+ // Saves in the preferences if the crash report registration was successful.
+ // This count is eventually send via UMA logs.
+ void RecordBreakpadRegistration(bool success);
+
+ // Saves in the preferences if the browser is running under a debugger.
+ // This count is eventually send via UMA logs.
+ void RecordBreakpadHasDebugger(bool has_debugger);
+
+ bool recording_active() const;
+ bool reporting_active() const;
+ bool has_unsent_logs() const;
+
+ // Redundant test to ensure that we are notified of a clean exit.
+ // This value should be true when process has completed shutdown.
+ static bool UmaMetricsProperlyShutdown();
+
+ // Register the specified |provider| to provide additional metrics into the
+ // UMA log. Should be called during MetricsService initialization only.
+ void RegisterMetricsProvider(std::unique_ptr<MetricsProvider> provider);
+
+ // Check if this install was cloned or imaged from another machine. If a
+ // clone is detected, reset the client id and low entropy source. This
+ // should not be called more than once.
+ void CheckForClonedInstall();
+
+ // Clears the stability metrics that are saved in local state.
+ void ClearSavedStabilityMetrics();
+
+ // Pushes a log that has been generated by an external component.
+ void PushExternalLog(const std::string& log);
+
+ // Updates data usage tracking prefs with the specified values.
+ void UpdateMetricsUsagePrefs(const std::string& service_name,
+ int message_size,
+ bool is_cellular);
+
+ variations::SyntheticTrialRegistry* synthetic_trial_registry() {
+ return &synthetic_trial_registry_;
+ }
+
+ protected:
+ // Exposed for testing.
+ MetricsLogManager* log_manager() { return &log_manager_; }
+ MetricsLogStore* log_store() {
+ return reporting_service_.metrics_log_store();
+ }
+
+ // Records the current environment (system profile) in |log|, and persists
+ // the results in prefs.
+ // Exposed for testing.
+ static std::string RecordCurrentEnvironmentHelper(
+ MetricsLog* log,
+ PrefService* local_state,
+ DelegatingProvider* delegating_provider);
+
+ private:
+ // The MetricsService has a lifecycle that is stored as a state.
+ // See metrics_service.cc for description of this lifecycle.
+ enum State {
+ INITIALIZED, // Constructor was called.
+ INIT_TASK_SCHEDULED, // Waiting for deferred init tasks to finish.
+ INIT_TASK_DONE, // Waiting for timer to send initial log.
+ SENDING_LOGS, // Sending logs an creating new ones when we run out.
+ };
+
+ enum ShutdownCleanliness {
+ CLEANLY_SHUTDOWN = 0xdeadbeef,
+ NEED_TO_SHUTDOWN = ~CLEANLY_SHUTDOWN
+ };
+
+ // The current state of recording for the MetricsService. The state is UNSET
+ // until set to something else, at which point it remains INACTIVE or ACTIVE
+ // for the lifetime of the object.
+ enum RecordingState {
+ INACTIVE,
+ ACTIVE,
+ UNSET
+ };
+
+ // Calls into the client to initialize some system profile metrics.
+ void StartInitTask();
+
+ // Callback that moves the state to INIT_TASK_DONE. When this is called, the
+ // state should be INIT_TASK_SCHEDULED.
+ void FinishedInitTask();
+
+ void OnUserAction(const std::string& action);
+
+ // Get the amount of uptime since this process started and since the last
+ // call to this function. Also updates the cumulative uptime metric (stored
+ // as a pref) for uninstall. Uptimes are measured using TimeTicks, which
+ // guarantees that it is monotonic and does not jump if the user changes
+ // their clock. The TimeTicks implementation also makes the clock not
+ // count time the computer is suspended.
+ void GetUptimes(PrefService* pref,
+ base::TimeDelta* incremental_uptime,
+ base::TimeDelta* uptime);
+
+ // Turns recording on or off.
+ // DisableRecording() also forces a persistent save of logging state (if
+ // anything has been recorded, or transmitted).
+ void EnableRecording();
+ void DisableRecording();
+
+ // If in_idle is true, sets idle_since_last_transmission to true.
+ // If in_idle is false and idle_since_last_transmission_ is true, sets
+ // idle_since_last_transmission to false and starts the timer (provided
+ // starting the timer is permitted).
+ void HandleIdleSinceLastTransmission(bool in_idle);
+
+ // Set up client ID, session ID, etc.
+ void InitializeMetricsState();
+
+ // Opens a new log for recording user experience metrics.
+ void OpenNewLog();
+
+ // Closes out the current log after adding any last information.
+ void CloseCurrentLog();
+
+ // Pushes the text of the current and staged logs into persistent storage.
+ // Called when Chrome shuts down.
+ void PushPendingLogsToPersistentStorage();
+
+ // Ensures that scheduler is running, assuming the current settings are such
+ // that metrics should be reported. If not, this is a no-op.
+ void StartSchedulerIfNecessary();
+
+ // Starts the process of uploading metrics data.
+ void StartScheduledUpload();
+
+ // Called by the client via a callback when final log info collection is
+ // complete.
+ void OnFinalLogInfoCollectionDone();
+
+ // Prepares the initial stability log, which is only logged when the previous
+ // run of Chrome crashed. This log contains any stability metrics left over
+ // from that previous run, and only these stability metrics. It uses the
+ // system profile from the previous session. |prefs_previous_version| is used
+ // to validate the version number recovered from the system profile. Returns
+ // true if a log was created.
+ bool PrepareInitialStabilityLog(const std::string& prefs_previous_version);
+
+ // Prepares the initial metrics log, which includes startup histograms and
+ // profiler data, as well as incremental stability-related metrics.
+ void PrepareInitialMetricsLog();
+
+ // Reads, increments and then sets the specified long preference that is
+ // stored as a string.
+ void IncrementLongPrefsValue(const char* path);
+
+ // Records that the browser was shut down cleanly.
+ void LogCleanShutdown(bool end_completed);
+
+ // Creates a new MetricsLog instance with the given |log_type|.
+ std::unique_ptr<MetricsLog> CreateLog(MetricsLog::LogType log_type);
+
+ // Records the current environment (system profile) in |log|, and persists
+ // the results in prefs and GlobalPersistentSystemProfile.
+ // Exposed for testing.
+ void RecordCurrentEnvironment(MetricsLog* log);
+
+ // Record complete list of histograms into the current log.
+ // Called when we close a log.
+ void RecordCurrentHistograms();
+
+ // Record complete list of stability histograms into the current log,
+ // i.e., histograms with the |kUmaStabilityHistogramFlag| flag set.
+ void RecordCurrentStabilityHistograms();
+
+ // Record a single independent profile and associated histogram from
+ // metrics providers. If this returns true, one was found and there may
+ // be more.
+ bool PrepareProviderMetricsLog();
+
+ // Records one independent histogram log and then reschedules itself to
+ // check for others. The interval is so as to not adversely impact the UI.
+ void PrepareProviderMetricsTask();
+
+ // Updates the "last live" browser timestamp and schedules the next update.
+ void UpdateLastLiveTimestampTask();
+
+ // Sub-service for uploading logs.
+ MetricsReportingService reporting_service_;
+
+ // Manager for the various in-flight logs.
+ MetricsLogManager log_manager_;
+
+ // |histogram_snapshot_manager_| prepares histogram deltas for transmission.
+ base::HistogramSnapshotManager histogram_snapshot_manager_;
+
+ // Used to manage various metrics reporting state prefs, such as client id,
+ // low entropy source and whether metrics reporting is enabled. Weak pointer.
+ MetricsStateManager* const state_manager_;
+
+ // Used to interact with the embedder. Weak pointer; must outlive |this|
+ // instance.
+ MetricsServiceClient* const client_;
+
+ // Registered metrics providers.
+ DelegatingProvider delegating_provider_;
+
+ PrefService* local_state_;
+
+ base::ActionCallback action_callback_;
+
+ // Indicate whether recording and reporting are currently happening.
+ // These should not be set directly, but by calling SetRecording and
+ // SetReporting.
+ RecordingState recording_state_;
+
+ // Indicate whether test mode is enabled, where the initial log should never
+ // be cut, and logs are neither persisted nor uploaded.
+ bool test_mode_active_;
+
+ // The progression of states made by the browser are recorded in the following
+ // state.
+ State state_;
+
+ // The initial metrics log, used to record startup metrics (histograms and
+ // profiler data). Note that if a crash occurred in the previous session, an
+ // initial stability log may be sent before this.
+ std::unique_ptr<MetricsLog> initial_metrics_log_;
+
+ // Whether the MetricsService object has received any notifications since
+ // the last time a transmission was sent.
+ bool idle_since_last_transmission_;
+
+ // A number that identifies the how many times the app has been launched.
+ int session_id_;
+
+ // The scheduler for determining when log rotations should happen.
+ std::unique_ptr<MetricsRotationScheduler> rotation_scheduler_;
+
+ // Stores the time of the first call to |GetUptimes()|.
+ base::TimeTicks first_updated_time_;
+
+ // Stores the time of the last call to |GetUptimes()|.
+ base::TimeTicks last_updated_time_;
+
+ variations::SyntheticTrialRegistry synthetic_trial_registry_;
+
+ // Redundant marker to check that we completed our shutdown, and set the
+ // exited-cleanly bit in the prefs.
+ static ShutdownCleanliness clean_shutdown_status_;
+
+ FRIEND_TEST_ALL_PREFIXES(MetricsServiceTest, IsPluginProcess);
+ FRIEND_TEST_ALL_PREFIXES(MetricsServiceTest,
+ PermutedEntropyCacheClearedWhenLowEntropyReset);
+
+ SEQUENCE_CHECKER(sequence_checker_);
+
+ // Weak pointers factory used to post task on different threads. All weak
+ // pointers managed by this factory have the same lifetime as MetricsService.
+ base::WeakPtrFactory<MetricsService> self_ptr_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(MetricsService);
+};
+
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_METRICS_SERVICE_H_
diff --git a/components/metrics/metrics_service_accessor.cc b/components/metrics/metrics_service_accessor.cc
new file mode 100644
index 0000000..f00d2e8
--- /dev/null
+++ b/components/metrics/metrics_service_accessor.cc
@@ -0,0 +1,96 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/metrics_service_accessor.h"
+
+#include "base/base_switches.h"
+#include "base/command_line.h"
+#include "components/metrics/metrics_pref_names.h"
+#include "components/metrics/metrics_service.h"
+#include "components/prefs/pref_service.h"
+#include "components/variations/hashing.h"
+
+namespace metrics {
+namespace {
+
+bool g_force_official_enabled_test = false;
+
+bool IsMetricsReportingEnabledForOfficialBuild(PrefService* pref_service) {
+ // In official builds, disable metrics when reporting field trials are
+ // forced; otherwise, use the value of the user's preference to determine
+ // whether to enable metrics reporting.
+ return !base::CommandLine::ForCurrentProcess()->HasSwitch(
+ switches::kForceFieldTrials) &&
+ pref_service->GetBoolean(prefs::kMetricsReportingEnabled);
+}
+
+} // namespace
+
+// static
+bool MetricsServiceAccessor::IsMetricsReportingEnabled(
+ PrefService* pref_service) {
+#if defined(GOOGLE_CHROME_BUILD)
+ return IsMetricsReportingEnabledForOfficialBuild(pref_service);
+#else
+ // In non-official builds, disable metrics reporting completely.
+ return g_force_official_enabled_test
+ ? IsMetricsReportingEnabledForOfficialBuild(pref_service)
+ : false;
+#endif // defined(GOOGLE_CHROME_BUILD)
+}
+
+// static
+bool MetricsServiceAccessor::RegisterSyntheticFieldTrial(
+ MetricsService* metrics_service,
+ base::StringPiece trial_name,
+ base::StringPiece group_name) {
+ return RegisterSyntheticFieldTrialWithNameAndGroupHash(
+ metrics_service, variations::HashName(trial_name),
+ variations::HashName(group_name));
+}
+
+// static
+bool MetricsServiceAccessor::RegisterSyntheticMultiGroupFieldTrial(
+ MetricsService* metrics_service,
+ base::StringPiece trial_name,
+ const std::vector<uint32_t>& group_name_hashes) {
+ if (!metrics_service)
+ return false;
+
+ metrics_service->synthetic_trial_registry()
+ ->RegisterSyntheticMultiGroupFieldTrial(variations::HashName(trial_name),
+ group_name_hashes);
+ return true;
+}
+
+// static
+bool MetricsServiceAccessor::RegisterSyntheticFieldTrialWithNameHash(
+ MetricsService* metrics_service,
+ uint32_t trial_name_hash,
+ base::StringPiece group_name) {
+ return RegisterSyntheticFieldTrialWithNameAndGroupHash(
+ metrics_service, trial_name_hash, variations::HashName(group_name));
+}
+
+// static
+bool MetricsServiceAccessor::RegisterSyntheticFieldTrialWithNameAndGroupHash(
+ MetricsService* metrics_service,
+ uint32_t trial_name_hash,
+ uint32_t group_name_hash) {
+ if (!metrics_service)
+ return false;
+
+ variations::SyntheticTrialGroup trial_group(trial_name_hash, group_name_hash);
+ metrics_service->synthetic_trial_registry()->RegisterSyntheticFieldTrial(
+ trial_group);
+ return true;
+}
+
+// static
+void MetricsServiceAccessor::SetForceIsMetricsReportingEnabledPrefLookup(
+ bool value) {
+ g_force_official_enabled_test = value;
+}
+
+} // namespace metrics
diff --git a/components/metrics/metrics_service_accessor.h b/components/metrics/metrics_service_accessor.h
new file mode 100644
index 0000000..3a36278
--- /dev/null
+++ b/components/metrics/metrics_service_accessor.h
@@ -0,0 +1,81 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_METRICS_SERVICE_ACCESSOR_H_
+#define COMPONENTS_METRICS_METRICS_SERVICE_ACCESSOR_H_
+
+#include <stdint.h>
+#include <vector>
+
+#include "base/macros.h"
+#include "base/strings/string_piece.h"
+
+class PrefService;
+
+namespace metrics {
+
+class MetricsService;
+
+// This class limits and documents access to metrics service helper methods.
+// These methods are protected so each user has to inherit own program-specific
+// specialization and enable access there by declaring friends.
+class MetricsServiceAccessor {
+ protected:
+ // Constructor declared as protected to enable inheritance. Descendants should
+ // disallow instantiation.
+ MetricsServiceAccessor() {}
+
+ // Returns whether metrics reporting is enabled, using the value of the
+ // kMetricsReportingEnabled pref in |pref_service| to determine whether user
+ // has enabled reporting.
+ static bool IsMetricsReportingEnabled(PrefService* pref_service);
+
+
+ // Registers a field trial name and group with |metrics_service| (if not
+ // null), to be used to annotate a UMA report with a particular configuration
+ // state. Returns true on success.
+ // See the comment on MetricsService::RegisterSyntheticFieldTrial() for
+ // details.
+ static bool RegisterSyntheticFieldTrial(MetricsService* metrics_service,
+ base::StringPiece trial_name,
+ base::StringPiece group_name);
+
+ // Registers a field trial name and set of groups with |metrics_service| (if
+ // not null), to be used to annotate a UMA report with a particular
+ // configuration state. Returns true on success.
+ // See the comment on MetricsService::RegisterSyntheticMultiGroupFieldTrial()
+ // for details.
+ static bool RegisterSyntheticMultiGroupFieldTrial(
+ MetricsService* metrics_service,
+ base::StringPiece trial_name,
+ const std::vector<uint32_t>& group_name_hashes);
+
+ // Same as RegisterSyntheticFieldTrial above, but takes in the trial name as a
+ // hash rather than computing the hash from the string.
+ static bool RegisterSyntheticFieldTrialWithNameHash(
+ MetricsService* metrics_service,
+ uint32_t trial_name_hash,
+ base::StringPiece group_name);
+
+ // Same as RegisterSyntheticFieldTrial above, but takes in the trial and group
+ // names as hashes rather than computing those hashes from the strings.
+ static bool RegisterSyntheticFieldTrialWithNameAndGroupHash(
+ MetricsService* metrics_service,
+ uint32_t trial_name_hash,
+ uint32_t group_name_hash);
+
+ // IsMetricsReportingEnabled() in non-official builds unconditionally returns
+ // false. This results in different behavior for tests running in official vs
+ // non-official builds. To get consistent behavior call this with true, which
+ // forces non-official builds to look at the prefs value official builds look
+ // at.
+ static void SetForceIsMetricsReportingEnabledPrefLookup(bool value);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MetricsServiceAccessor);
+};
+
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_METRICS_SERVICE_ACCESSOR_H_
diff --git a/components/metrics/metrics_service_client.cc b/components/metrics/metrics_service_client.cc
new file mode 100644
index 0000000..a80dc8a
--- /dev/null
+++ b/components/metrics/metrics_service_client.cc
@@ -0,0 +1,65 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/metrics_service_client.h"
+
+#include "components/metrics/url_constants.h"
+
+namespace metrics {
+
+MetricsServiceClient::MetricsServiceClient() {}
+
+MetricsServiceClient::~MetricsServiceClient() {}
+
+ukm::UkmService* MetricsServiceClient::GetUkmService() {
+ return nullptr;
+}
+
+bool MetricsServiceClient::IsReportingPolicyManaged() {
+ return false;
+}
+
+EnableMetricsDefault MetricsServiceClient::GetMetricsReportingDefaultState() {
+ return EnableMetricsDefault::DEFAULT_UNKNOWN;
+}
+
+bool MetricsServiceClient::IsUMACellularUploadLogicEnabled() {
+ return false;
+}
+
+std::string MetricsServiceClient::GetMetricsServerUrl() {
+ return kNewMetricsServerUrl;
+}
+
+std::string MetricsServiceClient::GetInsecureMetricsServerUrl() {
+ return kNewMetricsServerUrlInsecure;
+}
+
+bool MetricsServiceClient::SyncStateAllowsUkm() {
+ return false;
+}
+
+bool MetricsServiceClient::SyncStateAllowsExtensionUkm() {
+ return false;
+}
+
+bool MetricsServiceClient::AreNotificationListenersEnabledOnAllProfiles() {
+ return false;
+}
+
+void MetricsServiceClient::SetUpdateRunningServicesCallback(
+ const base::Closure& callback) {
+ update_running_services_ = callback;
+}
+
+void MetricsServiceClient::UpdateRunningServices() {
+ if (update_running_services_)
+ update_running_services_.Run();
+}
+
+std::string MetricsServiceClient::GetAppPackageName() {
+ return std::string();
+}
+
+} // namespace metrics
diff --git a/components/metrics/metrics_service_client.h b/components/metrics/metrics_service_client.h
new file mode 100644
index 0000000..c1bc828
--- /dev/null
+++ b/components/metrics/metrics_service_client.h
@@ -0,0 +1,151 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_METRICS_SERVICE_CLIENT_H_
+#define COMPONENTS_METRICS_METRICS_SERVICE_CLIENT_H_
+
+#include <stdint.h>
+
+#include <memory>
+#include <string>
+
+#include "base/callback.h"
+#include "base/strings/string16.h"
+#include "base/time/time.h"
+#include "components/metrics/metrics_log_uploader.h"
+#include "components/metrics/metrics_reporting_default_state.h"
+#include "third_party/metrics_proto/system_profile.pb.h"
+
+namespace base {
+class FilePath;
+}
+
+namespace ukm {
+class UkmService;
+}
+
+namespace metrics {
+
+class MetricsLogUploader;
+class MetricsService;
+
+// An abstraction of operations that depend on the embedder's (e.g. Chrome)
+// environment.
+class MetricsServiceClient {
+ public:
+ MetricsServiceClient();
+ virtual ~MetricsServiceClient();
+
+ // Returns the MetricsService instance that this client is associated with.
+ // With the exception of testing contexts, the returned instance must be valid
+ // for the lifetime of this object (typically, the embedder's client
+ // implementation will own the MetricsService instance being returned).
+ virtual MetricsService* GetMetricsService() = 0;
+
+ // Returns the UkmService instance that this client is associated with.
+ virtual ukm::UkmService* GetUkmService();
+
+ // Registers the client id with other services (e.g. crash reporting), called
+ // when metrics recording gets enabled.
+ virtual void SetMetricsClientId(const std::string& client_id) = 0;
+
+ // Returns the product value to use in uploaded reports, which will be used to
+ // set the ChromeUserMetricsExtension.product field. See comments on that
+ // field on why it's an int32_t rather than an enum.
+ virtual int32_t GetProduct() = 0;
+
+ // Returns the current application locale (e.g. "en-US").
+ virtual std::string GetApplicationLocale() = 0;
+
+ // Retrieves the brand code string associated with the install, returning
+ // false if no brand code is available.
+ virtual bool GetBrand(std::string* brand_code) = 0;
+
+ // Returns the release channel (e.g. stable, beta, etc) of the application.
+ virtual SystemProfileProto::Channel GetChannel() = 0;
+
+ // Returns the version of the application as a string.
+ virtual std::string GetVersionString() = 0;
+
+ // Called by the metrics service when a new environment has been recorded.
+ // Takes the serialized environment as a parameter. The contents of
+ // |serialized_environment| are consumed by the call, but the caller maintains
+ // ownership.
+ virtual void OnEnvironmentUpdate(std::string* serialized_environment) {}
+
+ // Called by the metrics service to record a clean shutdown.
+ virtual void OnLogCleanShutdown() {}
+
+ // Called prior to a metrics log being closed, allowing the client to collect
+ // extra histograms that will go in that log. Asynchronous API - the client
+ // implementation should call |done_callback| when complete.
+ virtual void CollectFinalMetricsForLog(
+ const base::Closure& done_callback) = 0;
+
+ // Get the URL of the metrics server.
+ virtual std::string GetMetricsServerUrl();
+
+ // Get the fallback HTTP URL of the metrics server.
+ virtual std::string GetInsecureMetricsServerUrl();
+
+ // Creates a MetricsLogUploader with the specified parameters (see comments on
+ // MetricsLogUploader for details).
+ virtual std::unique_ptr<MetricsLogUploader> CreateUploader(
+ base::StringPiece server_url,
+ base::StringPiece insecure_server_url,
+ base::StringPiece mime_type,
+ metrics::MetricsLogUploader::MetricServiceType service_type,
+ const MetricsLogUploader::UploadCallback& on_upload_complete) = 0;
+
+ // Returns the standard interval between upload attempts.
+ virtual base::TimeDelta GetStandardUploadInterval() = 0;
+
+ // Called on plugin loading errors.
+ virtual void OnPluginLoadingError(const base::FilePath& plugin_path) {}
+
+ // Called on renderer crashes in some embedders (e.g., those that do not use
+ // //content and thus do not have //content's notification system available
+ // as a mechanism for observing renderer crashes).
+ virtual void OnRendererProcessCrash() {}
+
+ // Returns whether metrics reporting is managed by policy.
+ virtual bool IsReportingPolicyManaged();
+
+ // Gets information about the default value for the metrics reporting checkbox
+ // shown during first-run.
+ virtual EnableMetricsDefault GetMetricsReportingDefaultState();
+
+ // Returns whether cellular logic is enabled for metrics reporting.
+ virtual bool IsUMACellularUploadLogicEnabled();
+
+ // Returns true iff sync is in a state that allows UKM to be enabled.
+ // See //components/ukm/observers/sync_disable_observer.h for details.
+ virtual bool SyncStateAllowsUkm();
+
+ // Returns true iff sync is in a state that allows UKM to capture extensions.
+ // See //components/ukm/observers/sync_disable_observer.h for details.
+ virtual bool SyncStateAllowsExtensionUkm();
+
+ // Returns whether UKM notification listeners were attached to all profiles.
+ virtual bool AreNotificationListenersEnabledOnAllProfiles();
+
+ // Gets the Chrome package name for Android. Returns empty string for other
+ // platforms.
+ virtual std::string GetAppPackageName();
+
+ // Sets the callback to run MetricsServiceManager::UpdateRunningServices.
+ void SetUpdateRunningServicesCallback(const base::Closure& callback);
+
+ // Notify MetricsServiceManager to UpdateRunningServices using callback.
+ void UpdateRunningServices();
+
+ private:
+ base::Closure update_running_services_;
+
+ DISALLOW_COPY_AND_ASSIGN(MetricsServiceClient);
+};
+
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_METRICS_SERVICE_CLIENT_H_
diff --git a/components/metrics/metrics_service_unittest.cc b/components/metrics/metrics_service_unittest.cc
new file mode 100644
index 0000000..4350238
--- /dev/null
+++ b/components/metrics/metrics_service_unittest.cc
@@ -0,0 +1,443 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/metrics_service.h"
+
+#include <stdint.h>
+
+#include <algorithm>
+#include <memory>
+#include <string>
+
+#include "base/bind.h"
+#include "base/macros.h"
+#include "base/metrics/metrics_hashes.h"
+#include "base/metrics/statistics_recorder.h"
+#include "base/metrics/user_metrics.h"
+#include "base/stl_util.h"
+#include "base/strings/string16.h"
+#include "base/test/scoped_feature_list.h"
+#include "base/test/test_simple_task_runner.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "components/metrics/client_info.h"
+#include "components/metrics/environment_recorder.h"
+#include "components/metrics/metrics_log.h"
+#include "components/metrics/metrics_pref_names.h"
+#include "components/metrics/metrics_state_manager.h"
+#include "components/metrics/metrics_upload_scheduler.h"
+#include "components/metrics/test_enabled_state_provider.h"
+#include "components/metrics/test_metrics_provider.h"
+#include "components/metrics/test_metrics_service_client.h"
+#include "components/prefs/testing_pref_service.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/zlib/google/compression_utils.h"
+
+namespace metrics {
+
+namespace {
+
+void YieldUntil(base::Time when) {
+ while (base::Time::Now() <= when)
+ base::PlatformThread::YieldCurrentThread();
+}
+
+void StoreNoClientInfoBackup(const ClientInfo& /* client_info */) {
+}
+
+std::unique_ptr<ClientInfo> ReturnNoBackup() {
+ return std::unique_ptr<ClientInfo>();
+}
+
+class TestMetricsService : public MetricsService {
+ public:
+ TestMetricsService(MetricsStateManager* state_manager,
+ MetricsServiceClient* client,
+ PrefService* local_state)
+ : MetricsService(state_manager, client, local_state) {}
+ ~TestMetricsService() override {}
+
+ using MetricsService::log_manager;
+ using MetricsService::log_store;
+ using MetricsService::RecordCurrentEnvironmentHelper;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(TestMetricsService);
+};
+
+class TestMetricsLog : public MetricsLog {
+ public:
+ TestMetricsLog(const std::string& client_id,
+ int session_id,
+ MetricsServiceClient* client)
+ : MetricsLog(client_id, session_id, MetricsLog::ONGOING_LOG, client) {}
+
+ ~TestMetricsLog() override {}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(TestMetricsLog);
+};
+
+class MetricsServiceTest : public testing::Test {
+ public:
+ MetricsServiceTest()
+ : task_runner_(new base::TestSimpleTaskRunner),
+ task_runner_handle_(task_runner_),
+ enabled_state_provider_(new TestEnabledStateProvider(false, false)) {
+ base::SetRecordActionTaskRunner(task_runner_);
+ MetricsService::RegisterPrefs(testing_local_state_.registry());
+ }
+
+ ~MetricsServiceTest() override {
+ MetricsService::SetExecutionPhase(ExecutionPhase::UNINITIALIZED_PHASE,
+ GetLocalState());
+ }
+
+ MetricsStateManager* GetMetricsStateManager() {
+ // Lazy-initialize the metrics_state_manager so that it correctly reads the
+ // stability state from prefs after tests have a chance to initialize it.
+ if (!metrics_state_manager_) {
+ metrics_state_manager_ = MetricsStateManager::Create(
+ GetLocalState(), enabled_state_provider_.get(), base::string16(),
+ base::Bind(&StoreNoClientInfoBackup), base::Bind(&ReturnNoBackup));
+ }
+ return metrics_state_manager_.get();
+ }
+
+ PrefService* GetLocalState() { return &testing_local_state_; }
+
+ // Sets metrics reporting as enabled for testing.
+ void EnableMetricsReporting() {
+ enabled_state_provider_->set_consent(true);
+ enabled_state_provider_->set_enabled(true);
+ }
+
+ // Finds a histogram with the specified |name_hash| in |histograms|.
+ const base::HistogramBase* FindHistogram(
+ const base::StatisticsRecorder::Histograms& histograms,
+ uint64_t name_hash) {
+ for (const base::HistogramBase* histogram : histograms) {
+ if (name_hash == base::HashMetricName(histogram->histogram_name()))
+ return histogram;
+ }
+ return nullptr;
+ }
+
+ // Checks whether |uma_log| contains any histograms that are not flagged
+ // with kUmaStabilityHistogramFlag. Stability logs should only contain such
+ // histograms.
+ void CheckForNonStabilityHistograms(
+ const ChromeUserMetricsExtension& uma_log) {
+ const int kStabilityFlags = base::HistogramBase::kUmaStabilityHistogramFlag;
+ const base::StatisticsRecorder::Histograms histograms =
+ base::StatisticsRecorder::GetHistograms();
+ for (int i = 0; i < uma_log.histogram_event_size(); ++i) {
+ const uint64_t hash = uma_log.histogram_event(i).name_hash();
+
+ const base::HistogramBase* histogram = FindHistogram(histograms, hash);
+ EXPECT_TRUE(histogram) << hash;
+
+ EXPECT_EQ(kStabilityFlags, histogram->flags() & kStabilityFlags) << hash;
+ }
+ }
+
+ protected:
+ scoped_refptr<base::TestSimpleTaskRunner> task_runner_;
+ base::ThreadTaskRunnerHandle task_runner_handle_;
+ base::test::ScopedFeatureList feature_list_;
+
+ private:
+ std::unique_ptr<TestEnabledStateProvider> enabled_state_provider_;
+ TestingPrefServiceSimple testing_local_state_;
+ std::unique_ptr<MetricsStateManager> metrics_state_manager_;
+
+ DISALLOW_COPY_AND_ASSIGN(MetricsServiceTest);
+};
+
+} // namespace
+
+TEST_F(MetricsServiceTest, InitialStabilityLogAfterCleanShutDown) {
+ EnableMetricsReporting();
+ GetLocalState()->SetBoolean(prefs::kStabilityExitedCleanly, true);
+
+ TestMetricsServiceClient client;
+ TestMetricsService service(
+ GetMetricsStateManager(), &client, GetLocalState());
+
+ TestMetricsProvider* test_provider = new TestMetricsProvider();
+ service.RegisterMetricsProvider(
+ std::unique_ptr<MetricsProvider>(test_provider));
+
+ service.InitializeMetricsRecordingState();
+
+ // No initial stability log should be generated.
+ EXPECT_FALSE(service.has_unsent_logs());
+
+ // Ensure that HasPreviousSessionData() is always called on providers,
+ // for consistency, even if other conditions already indicate their presence.
+ EXPECT_TRUE(test_provider->has_initial_stability_metrics_called());
+
+ // The test provider should not have been called upon to provide initial
+ // stability nor regular stability metrics.
+ EXPECT_FALSE(test_provider->provide_initial_stability_metrics_called());
+ EXPECT_FALSE(test_provider->provide_stability_metrics_called());
+}
+
+TEST_F(MetricsServiceTest, InitialStabilityLogAtProviderRequest) {
+ EnableMetricsReporting();
+
+ // Save an existing system profile to prefs, to correspond to what would be
+ // saved from a previous session.
+ TestMetricsServiceClient client;
+ TestMetricsLog log("client", 1, &client);
+ DelegatingProvider delegating_provider;
+ TestMetricsService::RecordCurrentEnvironmentHelper(&log, GetLocalState(),
+ &delegating_provider);
+
+ // Record stability build time and version from previous session, so that
+ // stability metrics (including exited cleanly flag) won't be cleared.
+ EnvironmentRecorder(GetLocalState())
+ .SetBuildtimeAndVersion(MetricsLog::GetBuildTime(),
+ client.GetVersionString());
+
+ // Set the clean exit flag, as that will otherwise cause a stabilty
+ // log to be produced, irrespective provider requests.
+ GetLocalState()->SetBoolean(prefs::kStabilityExitedCleanly, true);
+
+ TestMetricsService service(
+ GetMetricsStateManager(), &client, GetLocalState());
+ // Add a metrics provider that requests a stability log.
+ TestMetricsProvider* test_provider = new TestMetricsProvider();
+ test_provider->set_has_initial_stability_metrics(true);
+ service.RegisterMetricsProvider(
+ std::unique_ptr<MetricsProvider>(test_provider));
+
+ service.InitializeMetricsRecordingState();
+
+ // The initial stability log should be generated and persisted in unsent logs.
+ MetricsLogStore* log_store = service.log_store();
+ EXPECT_TRUE(log_store->has_unsent_logs());
+ EXPECT_FALSE(log_store->has_staged_log());
+
+ // Ensure that HasPreviousSessionData() is always called on providers,
+ // for consistency, even if other conditions already indicate their presence.
+ EXPECT_TRUE(test_provider->has_initial_stability_metrics_called());
+
+ // The test provider should have been called upon to provide initial
+ // stability and regular stability metrics.
+ EXPECT_TRUE(test_provider->provide_initial_stability_metrics_called());
+ EXPECT_TRUE(test_provider->provide_stability_metrics_called());
+
+ // Stage the log and retrieve it.
+ log_store->StageNextLog();
+ EXPECT_TRUE(log_store->has_staged_log());
+
+ std::string uncompressed_log;
+ EXPECT_TRUE(
+ compression::GzipUncompress(log_store->staged_log(), &uncompressed_log));
+
+ ChromeUserMetricsExtension uma_log;
+ EXPECT_TRUE(uma_log.ParseFromString(uncompressed_log));
+
+ EXPECT_TRUE(uma_log.has_client_id());
+ EXPECT_TRUE(uma_log.has_session_id());
+ EXPECT_TRUE(uma_log.has_system_profile());
+ EXPECT_EQ(0, uma_log.user_action_event_size());
+ EXPECT_EQ(0, uma_log.omnibox_event_size());
+ EXPECT_EQ(0, uma_log.perf_data_size());
+ CheckForNonStabilityHistograms(uma_log);
+
+ // As there wasn't an unclean shutdown, this log has zero crash count.
+ EXPECT_EQ(0, uma_log.system_profile().stability().crash_count());
+}
+
+TEST_F(MetricsServiceTest, InitialStabilityLogAfterCrash) {
+ EnableMetricsReporting();
+ GetLocalState()->ClearPref(prefs::kStabilityExitedCleanly);
+
+ // Set up prefs to simulate restarting after a crash.
+
+ // Save an existing system profile to prefs, to correspond to what would be
+ // saved from a previous session.
+ TestMetricsServiceClient client;
+ TestMetricsLog log("client", 1, &client);
+ DelegatingProvider delegating_provider;
+ TestMetricsService::RecordCurrentEnvironmentHelper(&log, GetLocalState(),
+ &delegating_provider);
+
+ // Record stability build time and version from previous session, so that
+ // stability metrics (including exited cleanly flag) won't be cleared.
+ EnvironmentRecorder(GetLocalState())
+ .SetBuildtimeAndVersion(MetricsLog::GetBuildTime(),
+ client.GetVersionString());
+
+ GetLocalState()->SetBoolean(prefs::kStabilityExitedCleanly, false);
+
+ TestMetricsService service(
+ GetMetricsStateManager(), &client, GetLocalState());
+ // Add a provider.
+ TestMetricsProvider* test_provider = new TestMetricsProvider();
+ service.RegisterMetricsProvider(
+ std::unique_ptr<MetricsProvider>(test_provider));
+ service.InitializeMetricsRecordingState();
+
+ // The initial stability log should be generated and persisted in unsent logs.
+ MetricsLogStore* log_store = service.log_store();
+ EXPECT_TRUE(log_store->has_unsent_logs());
+ EXPECT_FALSE(log_store->has_staged_log());
+
+ // Ensure that HasPreviousSessionData() is always called on providers,
+ // for consistency, even if other conditions already indicate their presence.
+ EXPECT_TRUE(test_provider->has_initial_stability_metrics_called());
+
+ // The test provider should have been called upon to provide initial
+ // stability and regular stability metrics.
+ EXPECT_TRUE(test_provider->provide_initial_stability_metrics_called());
+ EXPECT_TRUE(test_provider->provide_stability_metrics_called());
+
+ // Stage the log and retrieve it.
+ log_store->StageNextLog();
+ EXPECT_TRUE(log_store->has_staged_log());
+
+ std::string uncompressed_log;
+ EXPECT_TRUE(
+ compression::GzipUncompress(log_store->staged_log(), &uncompressed_log));
+
+ ChromeUserMetricsExtension uma_log;
+ EXPECT_TRUE(uma_log.ParseFromString(uncompressed_log));
+
+ EXPECT_TRUE(uma_log.has_client_id());
+ EXPECT_TRUE(uma_log.has_session_id());
+ EXPECT_TRUE(uma_log.has_system_profile());
+ EXPECT_EQ(0, uma_log.user_action_event_size());
+ EXPECT_EQ(0, uma_log.omnibox_event_size());
+ EXPECT_EQ(0, uma_log.perf_data_size());
+ CheckForNonStabilityHistograms(uma_log);
+
+ EXPECT_EQ(1, uma_log.system_profile().stability().crash_count());
+}
+
+TEST_F(MetricsServiceTest,
+ MetricsProviderOnRecordingDisabledCalledOnInitialStop) {
+ TestMetricsServiceClient client;
+ TestMetricsService service(
+ GetMetricsStateManager(), &client, GetLocalState());
+
+ TestMetricsProvider* test_provider = new TestMetricsProvider();
+ service.RegisterMetricsProvider(
+ std::unique_ptr<MetricsProvider>(test_provider));
+
+ service.InitializeMetricsRecordingState();
+ service.Stop();
+
+ EXPECT_TRUE(test_provider->on_recording_disabled_called());
+}
+
+TEST_F(MetricsServiceTest, MetricsProvidersInitialized) {
+ TestMetricsServiceClient client;
+ TestMetricsService service(
+ GetMetricsStateManager(), &client, GetLocalState());
+
+ TestMetricsProvider* test_provider = new TestMetricsProvider();
+ service.RegisterMetricsProvider(
+ std::unique_ptr<MetricsProvider>(test_provider));
+
+ service.InitializeMetricsRecordingState();
+
+ EXPECT_TRUE(test_provider->init_called());
+}
+
+TEST_F(MetricsServiceTest, SplitRotation) {
+ TestMetricsServiceClient client;
+ TestMetricsService service(GetMetricsStateManager(), &client,
+ GetLocalState());
+ service.InitializeMetricsRecordingState();
+ service.Start();
+ // Rotation loop should create a log and mark state as idle.
+ // Upload loop should start upload or be restarted.
+ // The independent-metrics upload job will be started and always be a task.
+ task_runner_->RunPendingTasks();
+ // Rotation loop should terminated due to being idle.
+ // Upload loop should start uploading if it isn't already.
+ task_runner_->RunPendingTasks();
+ EXPECT_TRUE(client.uploader()->is_uploading());
+ EXPECT_EQ(1U, task_runner_->NumPendingTasks());
+ service.OnApplicationNotIdle();
+ EXPECT_TRUE(client.uploader()->is_uploading());
+ EXPECT_EQ(2U, task_runner_->NumPendingTasks());
+ // Log generation should be suppressed due to unsent log.
+ // Idle state should not be reset.
+ task_runner_->RunPendingTasks();
+ EXPECT_TRUE(client.uploader()->is_uploading());
+ EXPECT_EQ(2U, task_runner_->NumPendingTasks());
+ // Make sure idle state was not reset.
+ task_runner_->RunPendingTasks();
+ EXPECT_TRUE(client.uploader()->is_uploading());
+ EXPECT_EQ(2U, task_runner_->NumPendingTasks());
+ // Upload should not be rescheduled, since there are no other logs.
+ client.uploader()->CompleteUpload(200);
+ EXPECT_FALSE(client.uploader()->is_uploading());
+ EXPECT_EQ(2U, task_runner_->NumPendingTasks());
+ // Running should generate a log, restart upload loop, and mark idle.
+ task_runner_->RunPendingTasks();
+ EXPECT_FALSE(client.uploader()->is_uploading());
+ EXPECT_EQ(3U, task_runner_->NumPendingTasks());
+ // Upload should start, and rotation loop should idle out.
+ task_runner_->RunPendingTasks();
+ EXPECT_TRUE(client.uploader()->is_uploading());
+ EXPECT_EQ(1U, task_runner_->NumPendingTasks());
+ // Uploader should reschedule when there is another log available.
+ service.PushExternalLog("Blah");
+ client.uploader()->CompleteUpload(200);
+ EXPECT_FALSE(client.uploader()->is_uploading());
+ EXPECT_EQ(2U, task_runner_->NumPendingTasks());
+ // Upload should start.
+ task_runner_->RunPendingTasks();
+ EXPECT_TRUE(client.uploader()->is_uploading());
+ EXPECT_EQ(1U, task_runner_->NumPendingTasks());
+}
+
+TEST_F(MetricsServiceTest, LastLiveTimestamp) {
+ TestMetricsServiceClient client;
+ TestMetricsService service(GetMetricsStateManager(), &client,
+ GetLocalState());
+
+ base::Time initial_last_live_time =
+ GetLocalState()->GetTime(prefs::kStabilityBrowserLastLiveTimeStamp);
+
+ service.InitializeMetricsRecordingState();
+ service.Start();
+
+ task_runner_->RunPendingTasks();
+ size_t num_pending_tasks = task_runner_->NumPendingTasks();
+
+ service.StartUpdatingLastLiveTimestamp();
+
+ // Starting the update sequence should not write anything, but should
+ // set up for a later write.
+ EXPECT_EQ(
+ initial_last_live_time,
+ GetLocalState()->GetTime(prefs::kStabilityBrowserLastLiveTimeStamp));
+ EXPECT_EQ(num_pending_tasks + 1, task_runner_->NumPendingTasks());
+
+ // To avoid flakiness, yield until we're over a microsecond threshold.
+ YieldUntil(initial_last_live_time + base::TimeDelta::FromMicroseconds(2));
+
+ task_runner_->RunPendingTasks();
+
+ // Verify that the time has updated in local state.
+ base::Time updated_last_live_time =
+ GetLocalState()->GetTime(prefs::kStabilityBrowserLastLiveTimeStamp);
+ EXPECT_LT(initial_last_live_time, updated_last_live_time);
+
+ // Double check that an update schedules again...
+ YieldUntil(updated_last_live_time + base::TimeDelta::FromMicroseconds(2));
+
+ task_runner_->RunPendingTasks();
+ EXPECT_LT(
+ updated_last_live_time,
+ GetLocalState()->GetTime(prefs::kStabilityBrowserLastLiveTimeStamp));
+}
+
+} // namespace metrics
diff --git a/components/metrics/metrics_state_manager.cc b/components/metrics/metrics_state_manager.cc
new file mode 100644
index 0000000..f4515c9
--- /dev/null
+++ b/components/metrics/metrics_state_manager.cc
@@ -0,0 +1,397 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/metrics_state_manager.h"
+
+#include <stddef.h>
+#include <utility>
+
+#include <memory>
+
+#include "base/command_line.h"
+#include "base/guid.h"
+#include "base/metrics/histogram_functions.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/rand_util.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/threading/thread_restrictions.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+#include "components/metrics/cloned_install_detector.h"
+#include "components/metrics/enabled_state_provider.h"
+#include "components/metrics/machine_id_provider.h"
+#include "components/metrics/metrics_log.h"
+#include "components/metrics/metrics_pref_names.h"
+#include "components/metrics/metrics_provider.h"
+#include "components/metrics/metrics_switches.h"
+#include "components/prefs/pref_registry_simple.h"
+#include "components/prefs/pref_service.h"
+#include "components/variations/caching_permuted_entropy_provider.h"
+#include "third_party/metrics_proto/chrome_user_metrics_extension.pb.h"
+#include "third_party/metrics_proto/system_profile.pb.h"
+
+namespace metrics {
+
+namespace {
+
+// The argument used to generate a non-identifying entropy source. We want no
+// more than 13 bits of entropy, so use this max to return a number in the range
+// [0, 7999] as the entropy source (12.97 bits of entropy).
+const int kMaxLowEntropySize = 8000;
+
+// Default prefs value for prefs::kMetricsLowEntropySource to indicate that
+// the value has not yet been set.
+const int kLowEntropySourceNotSet = -1;
+
+// Generates a new non-identifying entropy source used to seed persistent
+// activities.
+int GenerateLowEntropySource() {
+ return base::RandInt(0, kMaxLowEntropySize - 1);
+}
+
+// Records the given |low_entorpy_source_value| in a histogram.
+void LogLowEntropyValue(int low_entropy_source_value) {
+ base::UmaHistogramSparse("UMA.LowEntropySourceValue",
+ low_entropy_source_value);
+}
+
+int64_t ReadEnabledDate(PrefService* local_state) {
+ return local_state->GetInt64(prefs::kMetricsReportingEnabledTimestamp);
+}
+
+int64_t ReadInstallDate(PrefService* local_state) {
+ return local_state->GetInt64(prefs::kInstallDate);
+}
+
+// Round a timestamp measured in seconds since epoch to one with a granularity
+// of an hour. This can be used before uploaded potentially sensitive
+// timestamps.
+int64_t RoundSecondsToHour(int64_t time_in_seconds) {
+ return 3600 * (time_in_seconds / 3600);
+}
+
+// Records the cloned install histogram.
+void LogClonedInstall() {
+ // Equivalent to UMA_HISTOGRAM_BOOLEAN with the stability flag set.
+ UMA_STABILITY_HISTOGRAM_ENUMERATION("UMA.IsClonedInstall", 1, 2);
+}
+
+class MetricsStateMetricsProvider : public MetricsProvider {
+ public:
+ MetricsStateMetricsProvider(PrefService* local_state,
+ bool metrics_ids_were_reset,
+ std::string previous_client_id)
+ : local_state_(local_state),
+ metrics_ids_were_reset_(metrics_ids_were_reset),
+ previous_client_id_(std::move(previous_client_id)) {}
+
+ // MetricsProvider:
+ void ProvideSystemProfileMetrics(
+ SystemProfileProto* system_profile) override {
+ system_profile->set_uma_enabled_date(
+ RoundSecondsToHour(ReadEnabledDate(local_state_)));
+ system_profile->set_install_date(
+ RoundSecondsToHour(ReadInstallDate(local_state_)));
+ }
+
+ void ProvidePreviousSessionData(
+ ChromeUserMetricsExtension* uma_proto) override {
+ if (metrics_ids_were_reset_) {
+ LogClonedInstall();
+ if (!previous_client_id_.empty()) {
+ // If we know the previous client id, overwrite the client id for the
+ // previous session log so the log contains the client id at the time
+ // of the previous session. This allows better attribution of crashes
+ // to earlier behavior. If the previous client id is unknown, leave
+ // the current client id.
+ uma_proto->set_client_id(MetricsLog::Hash(previous_client_id_));
+ }
+ }
+ }
+
+ void ProvideCurrentSessionData(
+ ChromeUserMetricsExtension* uma_proto) override {
+ if (local_state_->GetBoolean(prefs::kMetricsResetIds))
+ LogClonedInstall();
+ }
+
+ private:
+ PrefService* const local_state_;
+ const bool metrics_ids_were_reset_;
+ // |previous_client_id_| is set only (if known) when |metrics_ids_were_reset_|
+ const std::string previous_client_id_;
+
+ DISALLOW_COPY_AND_ASSIGN(MetricsStateMetricsProvider);
+};
+
+} // namespace
+
+// static
+bool MetricsStateManager::instance_exists_ = false;
+
+MetricsStateManager::MetricsStateManager(
+ PrefService* local_state,
+ EnabledStateProvider* enabled_state_provider,
+ const base::string16& backup_registry_key,
+ const StoreClientInfoCallback& store_client_info,
+ const LoadClientInfoCallback& retrieve_client_info)
+ : local_state_(local_state),
+ enabled_state_provider_(enabled_state_provider),
+ store_client_info_(store_client_info),
+ load_client_info_(retrieve_client_info),
+ clean_exit_beacon_(backup_registry_key, local_state),
+ low_entropy_source_(kLowEntropySourceNotSet),
+ entropy_source_returned_(ENTROPY_SOURCE_NONE),
+ metrics_ids_were_reset_(false) {
+ ResetMetricsIDsIfNecessary();
+ if (enabled_state_provider_->IsConsentGiven())
+ ForceClientIdCreation();
+
+ // Set the install date if this is our first run.
+ int64_t install_date = local_state_->GetInt64(prefs::kInstallDate);
+ if (install_date == 0)
+ local_state_->SetInt64(prefs::kInstallDate, base::Time::Now().ToTimeT());
+
+ DCHECK(!instance_exists_);
+ instance_exists_ = true;
+}
+
+MetricsStateManager::~MetricsStateManager() {
+ DCHECK(instance_exists_);
+ instance_exists_ = false;
+}
+
+std::unique_ptr<MetricsProvider> MetricsStateManager::GetProvider() {
+ return std::make_unique<MetricsStateMetricsProvider>(
+ local_state_, metrics_ids_were_reset_, previous_client_id_);
+}
+
+bool MetricsStateManager::IsMetricsReportingEnabled() {
+ return enabled_state_provider_->IsReportingEnabled();
+}
+
+int64_t MetricsStateManager::GetInstallDate() const {
+ return ReadInstallDate(local_state_);
+}
+
+void MetricsStateManager::ForceClientIdCreation() {
+ {
+ std::string client_id_from_prefs =
+ local_state_->GetString(prefs::kMetricsClientID);
+ // If client id in prefs matches the cached copy, return early.
+ if (!client_id_from_prefs.empty() && client_id_from_prefs == client_id_)
+ return;
+ client_id_.swap(client_id_from_prefs);
+ }
+
+ if (!client_id_.empty())
+ return;
+
+ const std::unique_ptr<ClientInfo> client_info_backup = LoadClientInfo();
+ if (client_info_backup) {
+ client_id_ = client_info_backup->client_id;
+
+ const base::Time now = base::Time::Now();
+
+ // Save the recovered client id and also try to reinstantiate the backup
+ // values for the dates corresponding with that client id in order to avoid
+ // weird scenarios where we could report an old client id with a recent
+ // install date.
+ local_state_->SetString(prefs::kMetricsClientID, client_id_);
+ local_state_->SetInt64(prefs::kInstallDate,
+ client_info_backup->installation_date != 0
+ ? client_info_backup->installation_date
+ : now.ToTimeT());
+ local_state_->SetInt64(prefs::kMetricsReportingEnabledTimestamp,
+ client_info_backup->reporting_enabled_date != 0
+ ? client_info_backup->reporting_enabled_date
+ : now.ToTimeT());
+
+ base::TimeDelta recovered_installation_age;
+ if (client_info_backup->installation_date != 0) {
+ recovered_installation_age =
+ now - base::Time::FromTimeT(client_info_backup->installation_date);
+ }
+ UMA_HISTOGRAM_COUNTS_10000("UMA.ClientIdBackupRecoveredWithAge",
+ recovered_installation_age.InHours());
+
+ // Flush the backup back to persistent storage in case we re-generated
+ // missing data above.
+ BackUpCurrentClientInfo();
+ return;
+ }
+
+ // Failing attempts at getting an existing client ID, generate a new one.
+ client_id_ = base::GenerateGUID();
+ local_state_->SetString(prefs::kMetricsClientID, client_id_);
+
+ // Record the timestamp of when the user opted in to UMA.
+ local_state_->SetInt64(prefs::kMetricsReportingEnabledTimestamp,
+ base::Time::Now().ToTimeT());
+
+ BackUpCurrentClientInfo();
+}
+
+void MetricsStateManager::CheckForClonedInstall() {
+ DCHECK(!cloned_install_detector_);
+
+ if (!MachineIdProvider::HasId())
+ return;
+
+ cloned_install_detector_ = std::make_unique<ClonedInstallDetector>();
+ cloned_install_detector_->CheckForClonedInstall(local_state_);
+}
+
+std::unique_ptr<const base::FieldTrial::EntropyProvider>
+MetricsStateManager::CreateDefaultEntropyProvider() {
+ if (enabled_state_provider_->IsConsentGiven()) {
+ // For metrics reporting-enabled users, we combine the client ID and low
+ // entropy source to get the final entropy source. Otherwise, only use the
+ // low entropy source.
+ // This has two useful properties:
+ // 1) It makes the entropy source less identifiable for parties that do not
+ // know the low entropy source.
+ // 2) It makes the final entropy source resettable.
+ const int low_entropy_source_value = GetLowEntropySource();
+
+ UpdateEntropySourceReturnedValue(ENTROPY_SOURCE_HIGH);
+ const std::string high_entropy_source =
+ client_id_ + base::IntToString(low_entropy_source_value);
+ return std::unique_ptr<const base::FieldTrial::EntropyProvider>(
+ new variations::SHA1EntropyProvider(high_entropy_source));
+ }
+
+ UpdateEntropySourceReturnedValue(ENTROPY_SOURCE_LOW);
+ return CreateLowEntropyProvider();
+}
+
+std::unique_ptr<const base::FieldTrial::EntropyProvider>
+MetricsStateManager::CreateLowEntropyProvider() {
+ const int low_entropy_source_value = GetLowEntropySource();
+
+#if defined(OS_ANDROID) || defined(OS_IOS)
+ return std::unique_ptr<const base::FieldTrial::EntropyProvider>(
+ new variations::CachingPermutedEntropyProvider(
+ local_state_, low_entropy_source_value, kMaxLowEntropySize));
+#else
+ return std::unique_ptr<const base::FieldTrial::EntropyProvider>(
+ new variations::PermutedEntropyProvider(low_entropy_source_value,
+ kMaxLowEntropySize));
+#endif
+}
+
+// static
+std::unique_ptr<MetricsStateManager> MetricsStateManager::Create(
+ PrefService* local_state,
+ EnabledStateProvider* enabled_state_provider,
+ const base::string16& backup_registry_key,
+ const StoreClientInfoCallback& store_client_info,
+ const LoadClientInfoCallback& retrieve_client_info) {
+ std::unique_ptr<MetricsStateManager> result;
+ // Note: |instance_exists_| is updated in the constructor and destructor.
+ if (!instance_exists_) {
+ result.reset(new MetricsStateManager(local_state, enabled_state_provider,
+ backup_registry_key, store_client_info,
+ retrieve_client_info));
+ }
+ return result;
+}
+
+// static
+void MetricsStateManager::RegisterPrefs(PrefRegistrySimple* registry) {
+ registry->RegisterBooleanPref(prefs::kMetricsResetIds, false);
+ registry->RegisterStringPref(prefs::kMetricsClientID, std::string());
+ registry->RegisterInt64Pref(prefs::kMetricsReportingEnabledTimestamp, 0);
+ registry->RegisterIntegerPref(prefs::kMetricsLowEntropySource,
+ kLowEntropySourceNotSet);
+ registry->RegisterInt64Pref(prefs::kInstallDate, 0);
+
+ ClonedInstallDetector::RegisterPrefs(registry);
+ variations::CachingPermutedEntropyProvider::RegisterPrefs(registry);
+}
+
+void MetricsStateManager::BackUpCurrentClientInfo() {
+ ClientInfo client_info;
+ client_info.client_id = client_id_;
+ client_info.installation_date = ReadInstallDate(local_state_);
+ client_info.reporting_enabled_date = ReadEnabledDate(local_state_);
+ store_client_info_.Run(client_info);
+}
+
+std::unique_ptr<ClientInfo> MetricsStateManager::LoadClientInfo() {
+ std::unique_ptr<ClientInfo> client_info = load_client_info_.Run();
+
+ // The GUID retrieved should be valid unless retrieval failed.
+ // If not, return nullptr. This will result in a new GUID being generated by
+ // the calling function ForceClientIdCreation().
+ if (client_info && !base::IsValidGUID(client_info->client_id))
+ return nullptr;
+
+ return client_info;
+}
+
+int MetricsStateManager::GetLowEntropySource() {
+ UpdateLowEntropySource();
+ return low_entropy_source_;
+}
+
+void MetricsStateManager::UpdateLowEntropySource() {
+ // Note that the default value for the low entropy source and the default pref
+ // value are both kLowEntropySourceNotSet, which is used to identify if the
+ // value has been set or not.
+ if (low_entropy_source_ != kLowEntropySourceNotSet)
+ return;
+
+ const base::CommandLine* command_line(base::CommandLine::ForCurrentProcess());
+ // Only try to load the value from prefs if the user did not request a
+ // reset.
+ // Otherwise, skip to generating a new value.
+ if (!command_line->HasSwitch(switches::kResetVariationState)) {
+ int value = local_state_->GetInteger(prefs::kMetricsLowEntropySource);
+ // If the value is outside the [0, kMaxLowEntropySize) range, re-generate
+ // it below.
+ if (value >= 0 && value < kMaxLowEntropySize) {
+ low_entropy_source_ = value;
+ LogLowEntropyValue(low_entropy_source_);
+ return;
+ }
+ }
+
+ low_entropy_source_ = GenerateLowEntropySource();
+ LogLowEntropyValue(low_entropy_source_);
+ local_state_->SetInteger(prefs::kMetricsLowEntropySource,
+ low_entropy_source_);
+ variations::CachingPermutedEntropyProvider::ClearCache(local_state_);
+}
+
+void MetricsStateManager::UpdateEntropySourceReturnedValue(
+ EntropySourceType type) {
+ if (entropy_source_returned_ != ENTROPY_SOURCE_NONE)
+ return;
+
+ entropy_source_returned_ = type;
+ UMA_HISTOGRAM_ENUMERATION("UMA.EntropySourceType", type,
+ ENTROPY_SOURCE_ENUM_SIZE);
+}
+
+void MetricsStateManager::ResetMetricsIDsIfNecessary() {
+ if (!local_state_->GetBoolean(prefs::kMetricsResetIds))
+ return;
+ metrics_ids_were_reset_ = true;
+ previous_client_id_ = local_state_->GetString(prefs::kMetricsClientID);
+
+ UMA_HISTOGRAM_BOOLEAN("UMA.MetricsIDsReset", true);
+
+ DCHECK(client_id_.empty());
+ DCHECK_EQ(kLowEntropySourceNotSet, low_entropy_source_);
+
+ local_state_->ClearPref(prefs::kMetricsClientID);
+ local_state_->ClearPref(prefs::kMetricsLowEntropySource);
+ local_state_->ClearPref(prefs::kMetricsResetIds);
+
+ // Also clear the backed up client info.
+ store_client_info_.Run(ClientInfo());
+}
+
+} // namespace metrics
diff --git a/components/metrics/metrics_state_manager.h b/components/metrics/metrics_state_manager.h
new file mode 100644
index 0000000..a1a15b3
--- /dev/null
+++ b/components/metrics/metrics_state_manager.h
@@ -0,0 +1,217 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_METRICS_STATE_MANAGER_H_
+#define COMPONENTS_METRICS_METRICS_STATE_MANAGER_H_
+
+#include <memory>
+#include <string>
+
+#include "base/callback.h"
+#include "base/gtest_prod_util.h"
+#include "base/macros.h"
+#include "base/metrics/field_trial.h"
+#include "base/strings/string16.h"
+#include "components/metrics/clean_exit_beacon.h"
+#include "components/metrics/client_info.h"
+
+class PrefService;
+class PrefRegistrySimple;
+
+namespace metrics {
+
+class ClonedInstallDetector;
+class EnabledStateProvider;
+class MetricsProvider;
+
+// Responsible for managing MetricsService state prefs, specifically the UMA
+// client id and low entropy source. Code outside the metrics directory should
+// not be instantiating or using this class directly.
+class MetricsStateManager final {
+ public:
+ // A callback that can be invoked to store client info to persistent storage.
+ // Storing an empty client_id will resulted in the backup being voided.
+ typedef base::Callback<void(const ClientInfo& client_info)>
+ StoreClientInfoCallback;
+
+ // A callback that can be invoked to load client info stored through the
+ // StoreClientInfoCallback.
+ typedef base::Callback<std::unique_ptr<ClientInfo>(void)>
+ LoadClientInfoCallback;
+
+ ~MetricsStateManager();
+
+ std::unique_ptr<MetricsProvider> GetProvider();
+
+ // Returns true if the user has consented to sending metric reports, and there
+ // is no other reason to disable reporting. One such reason is client
+ // sampling, and this client isn't in the sample.
+ bool IsMetricsReportingEnabled();
+
+ // Returns the install date of the application, in seconds since the epoch.
+ int64_t GetInstallDate() const;
+
+ // Returns the client ID for this client, or the empty string if the user is
+ // not opted in to metrics reporting.
+ const std::string& client_id() const { return client_id_; }
+
+ // The CleanExitBeacon, used to determine whether the previous Chrome browser
+ // session terminated gracefully.
+ CleanExitBeacon* clean_exit_beacon() { return &clean_exit_beacon_; }
+ const CleanExitBeacon* clean_exit_beacon() const {
+ return &clean_exit_beacon_;
+ }
+
+ // Forces the client ID to be generated. This is useful in case it's needed
+ // before recording.
+ void ForceClientIdCreation();
+
+ // Checks if this install was cloned or imaged from another machine. If a
+ // clone is detected, resets the client id and low entropy source. This
+ // should not be called more than once.
+ void CheckForClonedInstall();
+
+ // Returns the preferred entropy provider used to seed persistent activities
+ // based on whether or not metrics reporting is permitted on this client.
+ //
+ // If there's consent to report metrics, this method returns an entropy
+ // provider that has a high source of entropy, partially based on the client
+ // ID. Otherwise, it returns an entropy provider that is based on a low
+ // entropy source.
+ std::unique_ptr<const base::FieldTrial::EntropyProvider>
+ CreateDefaultEntropyProvider();
+
+ // Returns an entropy provider that is based on a low entropy source. This
+ // provider is the same type of provider returned by
+ // CreateDefaultEntropyProvider when there's no consent to report metrics, but
+ // will be a new instance.
+ std::unique_ptr<const base::FieldTrial::EntropyProvider>
+ CreateLowEntropyProvider();
+
+ // Creates the MetricsStateManager, enforcing that only a single instance
+ // of the class exists at a time. Returns NULL if an instance exists already.
+ // On Windows, |backup_registry_key| is used to store a backup of the clean
+ // exit beacon. It is ignored on other platforms.
+ static std::unique_ptr<MetricsStateManager> Create(
+ PrefService* local_state,
+ EnabledStateProvider* enabled_state_provider,
+ const base::string16& backup_registry_key,
+ const StoreClientInfoCallback& store_client_info,
+ const LoadClientInfoCallback& load_client_info);
+
+ // Registers local state prefs used by this class.
+ static void RegisterPrefs(PrefRegistrySimple* registry);
+
+ private:
+ FRIEND_TEST_ALL_PREFIXES(MetricsStateManagerTest, CheckProviderResetIds);
+ FRIEND_TEST_ALL_PREFIXES(MetricsStateManagerTest, EntropySourceUsed_Low);
+ FRIEND_TEST_ALL_PREFIXES(MetricsStateManagerTest, EntropySourceUsed_High);
+ FRIEND_TEST_ALL_PREFIXES(MetricsStateManagerTest, LowEntropySource0NotReset);
+ FRIEND_TEST_ALL_PREFIXES(MetricsStateManagerTest,
+ PermutedEntropyCacheClearedWhenLowEntropyReset);
+ FRIEND_TEST_ALL_PREFIXES(MetricsStateManagerTest, ResetBackup);
+ FRIEND_TEST_ALL_PREFIXES(MetricsStateManagerTest, ResetMetricsIDs);
+
+ // Designates which entropy source was returned from this class.
+ // This is used for testing to validate that we return the correct source
+ // depending on the state of the service.
+ enum EntropySourceType {
+ ENTROPY_SOURCE_NONE,
+ ENTROPY_SOURCE_LOW,
+ ENTROPY_SOURCE_HIGH,
+ ENTROPY_SOURCE_ENUM_SIZE,
+ };
+
+ // Creates the MetricsStateManager with the given |local_state|. Uses
+ // |enabled_state_provider| to query whether there is consent for metrics
+ // reporting, and if it is enabled. Clients should instead use Create(), which
+ // enforces that a single instance of this class be alive at any given time.
+ // |store_client_info| should back up client info to persistent storage such
+ // that it is later retrievable by |load_client_info|.
+ MetricsStateManager(PrefService* local_state,
+ EnabledStateProvider* enabled_state_provider,
+ const base::string16& backup_registry_key,
+ const StoreClientInfoCallback& store_client_info,
+ const LoadClientInfoCallback& load_client_info);
+
+ // Backs up the current client info via |store_client_info_|.
+ void BackUpCurrentClientInfo();
+
+ // Loads the client info via |load_client_info_|.
+ std::unique_ptr<ClientInfo> LoadClientInfo();
+
+ // Returns the low entropy source for this client. This is a random value
+ // that is non-identifying amongst browser clients. This method will
+ // generate the entropy source value if it has not been called before.
+ int GetLowEntropySource();
+
+ // Generates the low entropy source value for this client if it is not
+ // already set.
+ void UpdateLowEntropySource();
+
+ // Updates |entropy_source_returned_| with |type| iff the current value is
+ // ENTROPY_SOURCE_NONE and logs the new value in a histogram.
+ void UpdateEntropySourceReturnedValue(EntropySourceType type);
+
+ // Returns the first entropy source that was returned by this service since
+ // start up, or NONE if neither was returned yet. This is exposed for testing
+ // only.
+ EntropySourceType entropy_source_returned() const {
+ return entropy_source_returned_;
+ }
+
+ // Reset the client id and low entropy source if the kMetricsResetMetricIDs
+ // pref is true.
+ void ResetMetricsIDsIfNecessary();
+
+ // Whether an instance of this class exists. Used to enforce that there aren't
+ // multiple instances of this class at a given time.
+ static bool instance_exists_;
+
+ // Weak pointer to the local state prefs store.
+ PrefService* const local_state_;
+
+ // Weak pointer to an enabled state provider. Used to know whether the user
+ // has consented to reporting, and if reporting should be done.
+ EnabledStateProvider* enabled_state_provider_;
+
+ // A callback run during client id creation so this MetricsStateManager can
+ // store a backup of the newly generated ID.
+ const StoreClientInfoCallback store_client_info_;
+
+ // A callback run if this MetricsStateManager can't get the client id from
+ // its typical location and wants to attempt loading it from this backup.
+ const LoadClientInfoCallback load_client_info_;
+
+ // A beacon used to determine whether the previous Chrome browser session
+ // terminated gracefully.
+ CleanExitBeacon clean_exit_beacon_;
+
+ // The identifier that's sent to the server with the log reports.
+ std::string client_id_;
+
+ // The non-identifying low entropy source value.
+ int low_entropy_source_;
+
+ // The last entropy source returned by this service, used for testing.
+ EntropySourceType entropy_source_returned_;
+
+ // The value of prefs::kMetricsResetIds seen upon startup, i.e., the value
+ // that was appropriate in the previous session. Used when reporting previous
+ // session (stability) data.
+ bool metrics_ids_were_reset_;
+
+ // The value of the metrics id before reseting. Only possibly valid if the
+ // metrics id was reset. May be blank if the metrics id was reset but Chrome
+ // has no record of what the previous metrics id was.
+ std::string previous_client_id_;
+
+ std::unique_ptr<ClonedInstallDetector> cloned_install_detector_;
+
+ DISALLOW_COPY_AND_ASSIGN(MetricsStateManager);
+};
+
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_METRICS_STATE_MANAGER_H_
diff --git a/components/metrics/metrics_state_manager_unittest.cc b/components/metrics/metrics_state_manager_unittest.cc
new file mode 100644
index 0000000..0e1f148
--- /dev/null
+++ b/components/metrics/metrics_state_manager_unittest.cc
@@ -0,0 +1,465 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/metrics_state_manager.h"
+
+#include <ctype.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <string>
+#include <utility>
+
+#include "base/bind.h"
+#include "base/command_line.h"
+#include "base/macros.h"
+#include "base/strings/string16.h"
+#include "base/test/metrics/histogram_tester.h"
+#include "components/metrics/client_info.h"
+#include "components/metrics/metrics_log.h"
+#include "components/metrics/metrics_pref_names.h"
+#include "components/metrics/metrics_service.h"
+#include "components/metrics/metrics_switches.h"
+#include "components/metrics/test_enabled_state_provider.h"
+#include "components/prefs/testing_pref_service.h"
+#include "components/variations/caching_permuted_entropy_provider.h"
+#include "components/variations/pref_names.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace metrics {
+
+class MetricsStateManagerTest : public testing::Test {
+ public:
+ MetricsStateManagerTest()
+ : test_begin_time_(base::Time::Now().ToTimeT()),
+ enabled_state_provider_(new TestEnabledStateProvider(false, false)) {
+ MetricsService::RegisterPrefs(prefs_.registry());
+ }
+
+ std::unique_ptr<MetricsStateManager> CreateStateManager() {
+ return MetricsStateManager::Create(
+ &prefs_, enabled_state_provider_.get(), base::string16(),
+ base::Bind(&MetricsStateManagerTest::MockStoreClientInfoBackup,
+ base::Unretained(this)),
+ base::Bind(&MetricsStateManagerTest::LoadFakeClientInfoBackup,
+ base::Unretained(this)));
+ }
+
+ // Sets metrics reporting as enabled for testing.
+ void EnableMetricsReporting() {
+ enabled_state_provider_->set_consent(true);
+ enabled_state_provider_->set_enabled(true);
+ }
+
+ void SetClientInfoPrefs(const ClientInfo& client_info) {
+ prefs_.SetString(prefs::kMetricsClientID, client_info.client_id);
+ prefs_.SetInt64(prefs::kInstallDate, client_info.installation_date);
+ prefs_.SetInt64(prefs::kMetricsReportingEnabledTimestamp,
+ client_info.reporting_enabled_date);
+ }
+
+ void SetFakeClientInfoBackup(const ClientInfo& client_info) {
+ fake_client_info_backup_.reset(new ClientInfo);
+ fake_client_info_backup_->client_id = client_info.client_id;
+ fake_client_info_backup_->installation_date = client_info.installation_date;
+ fake_client_info_backup_->reporting_enabled_date =
+ client_info.reporting_enabled_date;
+ }
+
+ protected:
+ TestingPrefServiceSimple prefs_;
+
+ // Last ClientInfo stored by the MetricsStateManager via
+ // MockStoreClientInfoBackup.
+ std::unique_ptr<ClientInfo> stored_client_info_backup_;
+
+ // If set, will be returned via LoadFakeClientInfoBackup if requested by the
+ // MetricsStateManager.
+ std::unique_ptr<ClientInfo> fake_client_info_backup_;
+
+ const int64_t test_begin_time_;
+
+ private:
+ // Stores the |client_info| in |stored_client_info_backup_| for verification
+ // by the tests later.
+ void MockStoreClientInfoBackup(const ClientInfo& client_info) {
+ stored_client_info_backup_.reset(new ClientInfo);
+ stored_client_info_backup_->client_id = client_info.client_id;
+ stored_client_info_backup_->installation_date =
+ client_info.installation_date;
+ stored_client_info_backup_->reporting_enabled_date =
+ client_info.reporting_enabled_date;
+
+ // Respect the contract that storing an empty client_id voids the existing
+ // backup (required for the last section of the ForceClientIdCreation test
+ // below).
+ if (client_info.client_id.empty())
+ fake_client_info_backup_.reset();
+ }
+
+ // Hands out a copy of |fake_client_info_backup_| if it is set.
+ std::unique_ptr<ClientInfo> LoadFakeClientInfoBackup() {
+ if (!fake_client_info_backup_)
+ return std::unique_ptr<ClientInfo>();
+
+ std::unique_ptr<ClientInfo> backup_copy(new ClientInfo);
+ backup_copy->client_id = fake_client_info_backup_->client_id;
+ backup_copy->installation_date =
+ fake_client_info_backup_->installation_date;
+ backup_copy->reporting_enabled_date =
+ fake_client_info_backup_->reporting_enabled_date;
+ return backup_copy;
+ }
+
+ std::unique_ptr<TestEnabledStateProvider> enabled_state_provider_;
+
+ DISALLOW_COPY_AND_ASSIGN(MetricsStateManagerTest);
+};
+
+// Ensure the ClientId is formatted as expected.
+TEST_F(MetricsStateManagerTest, ClientIdCorrectlyFormatted) {
+ std::unique_ptr<MetricsStateManager> state_manager(CreateStateManager());
+ state_manager->ForceClientIdCreation();
+
+ const std::string client_id = state_manager->client_id();
+ EXPECT_EQ(36U, client_id.length());
+
+ for (size_t i = 0; i < client_id.length(); ++i) {
+ char current = client_id[i];
+ if (i == 8 || i == 13 || i == 18 || i == 23)
+ EXPECT_EQ('-', current);
+ else
+ EXPECT_TRUE(isxdigit(current));
+ }
+}
+
+TEST_F(MetricsStateManagerTest, EntropySourceUsed_Low) {
+ std::unique_ptr<MetricsStateManager> state_manager(CreateStateManager());
+ state_manager->CreateDefaultEntropyProvider();
+ EXPECT_EQ(MetricsStateManager::ENTROPY_SOURCE_LOW,
+ state_manager->entropy_source_returned());
+}
+
+TEST_F(MetricsStateManagerTest, EntropySourceUsed_High) {
+ EnableMetricsReporting();
+ std::unique_ptr<MetricsStateManager> state_manager(CreateStateManager());
+ state_manager->CreateDefaultEntropyProvider();
+ EXPECT_EQ(MetricsStateManager::ENTROPY_SOURCE_HIGH,
+ state_manager->entropy_source_returned());
+}
+
+TEST_F(MetricsStateManagerTest, LowEntropySource0NotReset) {
+ std::unique_ptr<MetricsStateManager> state_manager(CreateStateManager());
+
+ // Get the low entropy source once, to initialize it.
+ state_manager->GetLowEntropySource();
+
+ // Now, set it to 0 and ensure it doesn't get reset.
+ state_manager->low_entropy_source_ = 0;
+ EXPECT_EQ(0, state_manager->GetLowEntropySource());
+ // Call it another time, just to make sure.
+ EXPECT_EQ(0, state_manager->GetLowEntropySource());
+}
+
+TEST_F(MetricsStateManagerTest,
+ PermutedEntropyCacheClearedWhenLowEntropyReset) {
+ const PrefService::Preference* low_entropy_pref =
+ prefs_.FindPreference(prefs::kMetricsLowEntropySource);
+ const char* kCachePrefName =
+ variations::prefs::kVariationsPermutedEntropyCache;
+ int low_entropy_value = -1;
+
+ // First, generate an initial low entropy source value.
+ {
+ EXPECT_TRUE(low_entropy_pref->IsDefaultValue());
+
+ std::unique_ptr<MetricsStateManager> state_manager(CreateStateManager());
+ state_manager->GetLowEntropySource();
+
+ EXPECT_FALSE(low_entropy_pref->IsDefaultValue());
+ EXPECT_TRUE(low_entropy_pref->GetValue()->GetAsInteger(&low_entropy_value));
+ }
+
+ // Now, set a dummy value in the permuted entropy cache pref and verify that
+ // another call to GetLowEntropySource() doesn't clobber it when
+ // --reset-variation-state wasn't specified.
+ {
+ prefs_.SetString(kCachePrefName, "test");
+
+ std::unique_ptr<MetricsStateManager> state_manager(CreateStateManager());
+ state_manager->GetLowEntropySource();
+
+ EXPECT_EQ("test", prefs_.GetString(kCachePrefName));
+ EXPECT_EQ(low_entropy_value,
+ prefs_.GetInteger(prefs::kMetricsLowEntropySource));
+ }
+
+ // Verify that the cache does get reset if --reset-variations-state is passed.
+ {
+ base::CommandLine::ForCurrentProcess()->AppendSwitch(
+ switches::kResetVariationState);
+
+ std::unique_ptr<MetricsStateManager> state_manager(CreateStateManager());
+ state_manager->GetLowEntropySource();
+
+ EXPECT_TRUE(prefs_.GetString(kCachePrefName).empty());
+ }
+}
+
+// Check that setting the kMetricsResetIds pref to true causes the client id to
+// be reset. We do not check that the low entropy source is reset because we
+// cannot ensure that metrics state manager won't generate the same id again.
+TEST_F(MetricsStateManagerTest, ResetMetricsIDs) {
+ // Set an initial client id in prefs. It should not be possible for the
+ // metrics state manager to generate this id randomly.
+ const std::string kInitialClientId = "initial client id";
+ prefs_.SetString(prefs::kMetricsClientID, kInitialClientId);
+
+ // Make sure the initial client id isn't reset by the metrics state manager.
+ {
+ std::unique_ptr<MetricsStateManager> state_manager(CreateStateManager());
+ state_manager->ForceClientIdCreation();
+ EXPECT_EQ(kInitialClientId, state_manager->client_id());
+ EXPECT_FALSE(state_manager->metrics_ids_were_reset_);
+ }
+
+ // Set the reset pref to cause the IDs to be reset.
+ prefs_.SetBoolean(prefs::kMetricsResetIds, true);
+
+ // Cause the actual reset to happen.
+ {
+ std::unique_ptr<MetricsStateManager> state_manager(CreateStateManager());
+ state_manager->ForceClientIdCreation();
+ EXPECT_NE(kInitialClientId, state_manager->client_id());
+ EXPECT_TRUE(state_manager->metrics_ids_were_reset_);
+ EXPECT_EQ(kInitialClientId, state_manager->previous_client_id_);
+
+ state_manager->GetLowEntropySource();
+
+ EXPECT_FALSE(prefs_.GetBoolean(prefs::kMetricsResetIds));
+ }
+
+ EXPECT_NE(kInitialClientId, prefs_.GetString(prefs::kMetricsClientID));
+}
+
+TEST_F(MetricsStateManagerTest, ForceClientIdCreation) {
+ const int64_t kFakeInstallationDate = 12345;
+ prefs_.SetInt64(prefs::kInstallDate, kFakeInstallationDate);
+
+ {
+ std::unique_ptr<MetricsStateManager> state_manager(CreateStateManager());
+
+ // client_id shouldn't be auto-generated if metrics reporting is not
+ // enabled.
+ EXPECT_EQ(std::string(), state_manager->client_id());
+ EXPECT_EQ(0, prefs_.GetInt64(prefs::kMetricsReportingEnabledTimestamp));
+
+ // Confirm that the initial ForceClientIdCreation call creates the client id
+ // and backs it up via MockStoreClientInfoBackup.
+ EXPECT_FALSE(stored_client_info_backup_);
+ state_manager->ForceClientIdCreation();
+ EXPECT_NE(std::string(), state_manager->client_id());
+ EXPECT_GE(prefs_.GetInt64(prefs::kMetricsReportingEnabledTimestamp),
+ test_begin_time_);
+
+ ASSERT_TRUE(stored_client_info_backup_);
+ EXPECT_EQ(state_manager->client_id(),
+ stored_client_info_backup_->client_id);
+ EXPECT_EQ(kFakeInstallationDate,
+ stored_client_info_backup_->installation_date);
+ EXPECT_EQ(prefs_.GetInt64(prefs::kMetricsReportingEnabledTimestamp),
+ stored_client_info_backup_->reporting_enabled_date);
+ }
+}
+
+TEST_F(MetricsStateManagerTest, LoadPrefs) {
+ ClientInfo client_info;
+ client_info.client_id = "AAAAAAAA-BBBB-CCCC-DDDD-EEEEEEEEEEEF";
+ client_info.installation_date = 1112;
+ client_info.reporting_enabled_date = 2223;
+ SetClientInfoPrefs(client_info);
+
+ EnableMetricsReporting();
+ {
+ EXPECT_FALSE(fake_client_info_backup_);
+ EXPECT_FALSE(stored_client_info_backup_);
+
+ std::unique_ptr<MetricsStateManager> state_manager(CreateStateManager());
+
+ // client_id should be auto-obtained from the constructor when metrics
+ // reporting is enabled.
+ EXPECT_EQ(client_info.client_id, state_manager->client_id());
+
+ // The backup should not be modified.
+ ASSERT_FALSE(stored_client_info_backup_);
+
+ // Re-forcing client id creation shouldn't cause another backup and
+ // shouldn't affect the existing client id.
+ state_manager->ForceClientIdCreation();
+ EXPECT_FALSE(stored_client_info_backup_);
+ EXPECT_EQ(client_info.client_id, state_manager->client_id());
+ }
+}
+
+TEST_F(MetricsStateManagerTest, PreferPrefs) {
+ ClientInfo client_info;
+ client_info.client_id = "AAAAAAAA-BBBB-CCCC-DDDD-EEEEEEEEEEEF";
+ client_info.installation_date = 1112;
+ client_info.reporting_enabled_date = 2223;
+ SetClientInfoPrefs(client_info);
+
+ ClientInfo client_info2;
+ client_info2.client_id = "AAAAAAAA-BBBB-CCCC-DDDD-EEEEEEEEEEEE";
+ client_info2.installation_date = 1111;
+ client_info2.reporting_enabled_date = 2222;
+ SetFakeClientInfoBackup(client_info2);
+
+ EnableMetricsReporting();
+ {
+ // The backup should be ignored if we already have a client id.
+
+ EXPECT_FALSE(stored_client_info_backup_);
+
+ std::unique_ptr<MetricsStateManager> state_manager(CreateStateManager());
+ EXPECT_EQ(client_info.client_id, state_manager->client_id());
+
+ // The backup should not be modified.
+ ASSERT_FALSE(stored_client_info_backup_);
+ }
+}
+
+TEST_F(MetricsStateManagerTest, RestoreBackup) {
+ ClientInfo client_info;
+ client_info.client_id = "AAAAAAAA-BBBB-CCCC-DDDD-EEEEEEEEEEEF";
+ client_info.installation_date = 1112;
+ client_info.reporting_enabled_date = 2223;
+ SetClientInfoPrefs(client_info);
+
+ ClientInfo client_info2;
+ client_info2.client_id = "AAAAAAAA-BBBB-CCCC-DDDD-EEEEEEEEEEEE";
+ client_info2.installation_date = 1111;
+ client_info2.reporting_enabled_date = 2222;
+ SetFakeClientInfoBackup(client_info2);
+
+ prefs_.ClearPref(prefs::kMetricsClientID);
+ prefs_.ClearPref(prefs::kMetricsReportingEnabledTimestamp);
+
+ EnableMetricsReporting();
+ {
+ // The backup should kick in if the client id has gone missing. It should
+ // replace remaining and missing dates as well.
+
+ EXPECT_FALSE(stored_client_info_backup_);
+
+ std::unique_ptr<MetricsStateManager> state_manager(CreateStateManager());
+ EXPECT_EQ(client_info2.client_id, state_manager->client_id());
+ EXPECT_EQ(client_info2.installation_date,
+ prefs_.GetInt64(prefs::kInstallDate));
+ EXPECT_EQ(client_info2.reporting_enabled_date,
+ prefs_.GetInt64(prefs::kMetricsReportingEnabledTimestamp));
+
+ EXPECT_TRUE(stored_client_info_backup_);
+ }
+}
+
+TEST_F(MetricsStateManagerTest, ResetBackup) {
+ ClientInfo client_info;
+ client_info.client_id = "AAAAAAAA-BBBB-CCCC-DDDD-EEEEEEEEEEEE";
+ client_info.installation_date = 1111;
+ client_info.reporting_enabled_date = 2222;
+
+ SetFakeClientInfoBackup(client_info);
+ SetClientInfoPrefs(client_info);
+
+ prefs_.SetBoolean(prefs::kMetricsResetIds, true);
+
+ EnableMetricsReporting();
+ {
+ // Upon request to reset metrics ids, the existing backup should not be
+ // restored.
+
+ std::unique_ptr<MetricsStateManager> state_manager(CreateStateManager());
+
+ // A brand new client id should have been generated.
+ EXPECT_NE(std::string(), state_manager->client_id());
+ EXPECT_NE(client_info.client_id, state_manager->client_id());
+ EXPECT_TRUE(state_manager->metrics_ids_were_reset_);
+ EXPECT_EQ(client_info.client_id, state_manager->previous_client_id_);
+ EXPECT_TRUE(stored_client_info_backup_);
+
+ // The installation date should not have been affected.
+ EXPECT_EQ(client_info.installation_date,
+ prefs_.GetInt64(prefs::kInstallDate));
+
+ // The metrics-reporting-enabled date will be reset to Now().
+ EXPECT_GE(prefs_.GetInt64(prefs::kMetricsReportingEnabledTimestamp),
+ test_begin_time_);
+ }
+}
+
+TEST_F(MetricsStateManagerTest, CheckProvider) {
+ int64_t kInstallDate = 1373051956;
+ int64_t kInstallDateExpected = 1373050800; // Computed from kInstallDate.
+ int64_t kEnabledDate = 1373001211;
+ int64_t kEnabledDateExpected = 1373000400; // Computed from kEnabledDate.
+
+ ClientInfo client_info;
+ client_info.client_id = "AAAAAAAA-BBBB-CCCC-DDDD-EEEEEEEEEEEE";
+ client_info.installation_date = kInstallDate;
+ client_info.reporting_enabled_date = kEnabledDate;
+
+ SetFakeClientInfoBackup(client_info);
+ SetClientInfoPrefs(client_info);
+
+ std::unique_ptr<MetricsStateManager> state_manager(CreateStateManager());
+ std::unique_ptr<MetricsProvider> provider = state_manager->GetProvider();
+ SystemProfileProto system_profile;
+ provider->ProvideSystemProfileMetrics(&system_profile);
+ EXPECT_EQ(system_profile.install_date(), kInstallDateExpected);
+ EXPECT_EQ(system_profile.uma_enabled_date(), kEnabledDateExpected);
+
+ base::HistogramTester histogram_tester;
+ ChromeUserMetricsExtension uma_proto;
+ provider->ProvidePreviousSessionData(&uma_proto);
+ // The client_id field in the proto should not be overwritten.
+ EXPECT_FALSE(uma_proto.has_client_id());
+ // Nothing should have been emitted to the cloned install histogram.
+ histogram_tester.ExpectTotalCount("UMA.IsClonedInstall", 0);
+}
+
+TEST_F(MetricsStateManagerTest, CheckProviderResetIds) {
+ int64_t kInstallDate = 1373051956;
+ int64_t kInstallDateExpected = 1373050800; // Computed from kInstallDate.
+ int64_t kEnabledDate = 1373001211;
+ int64_t kEnabledDateExpected = 1373000400; // Computed from kEnabledDate.
+
+ ClientInfo client_info;
+ client_info.client_id = "AAAAAAAA-BBBB-CCCC-DDDD-EEEEEEEEEEEE";
+ client_info.installation_date = kInstallDate;
+ client_info.reporting_enabled_date = kEnabledDate;
+
+ SetFakeClientInfoBackup(client_info);
+ SetClientInfoPrefs(client_info);
+
+ // Set the reset pref to cause the IDs to be reset.
+ prefs_.SetBoolean(prefs::kMetricsResetIds, true);
+ std::unique_ptr<MetricsStateManager> state_manager(CreateStateManager());
+ EXPECT_NE(client_info.client_id, state_manager->client_id());
+ EXPECT_TRUE(state_manager->metrics_ids_were_reset_);
+ EXPECT_EQ(client_info.client_id, state_manager->previous_client_id_);
+
+ std::unique_ptr<MetricsProvider> provider = state_manager->GetProvider();
+ SystemProfileProto system_profile;
+ provider->ProvideSystemProfileMetrics(&system_profile);
+ EXPECT_EQ(system_profile.install_date(), kInstallDateExpected);
+ EXPECT_EQ(system_profile.uma_enabled_date(), kEnabledDateExpected);
+
+ base::HistogramTester histogram_tester;
+ ChromeUserMetricsExtension uma_proto;
+ provider->ProvidePreviousSessionData(&uma_proto);
+ EXPECT_EQ(MetricsLog::Hash(state_manager->previous_client_id_),
+ uma_proto.client_id());
+ histogram_tester.ExpectUniqueSample("UMA.IsClonedInstall", 1, 1);
+}
+
+} // namespace metrics
diff --git a/components/metrics/metrics_switches.cc b/components/metrics/metrics_switches.cc
new file mode 100644
index 0000000..38f65ae
--- /dev/null
+++ b/components/metrics/metrics_switches.cc
@@ -0,0 +1,25 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/metrics_switches.h"
+
+namespace metrics {
+namespace switches {
+
+// Enables the recording of metrics reports but disables reporting. In contrast
+// to kDisableMetrics, this executes all the code that a normal client would
+// use for reporting, except the report is dropped rather than sent to the
+// server. This is useful for finding issues in the metrics code during UI and
+// performance tests.
+const char kMetricsRecordingOnly[] = "metrics-recording-only";
+
+// Forces a reset of the one-time-randomized FieldTrials on this client, also
+// known as the Chrome Variations state.
+const char kResetVariationState[] = "reset-variation-state";
+
+// Forces metrics reporting to be enabled.
+const char kForceEnableMetricsReporting[] = "force-enable-metrics-reporting";
+
+} // namespace switches
+} // namespace metrics
diff --git a/components/metrics/metrics_switches.h b/components/metrics/metrics_switches.h
new file mode 100644
index 0000000..10e41e9
--- /dev/null
+++ b/components/metrics/metrics_switches.h
@@ -0,0 +1,21 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_METRICS_SWITCHES_H_
+#define COMPONENTS_METRICS_METRICS_SWITCHES_H_
+
+namespace metrics {
+namespace switches {
+
+// Alphabetical list of switches specific to the metrics component. Document
+// each in the .cc file.
+
+extern const char kMetricsRecordingOnly[];
+extern const char kResetVariationState[];
+extern const char kForceEnableMetricsReporting[];
+
+} // namespace switches
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_METRICS_SWITCHES_H_
diff --git a/components/metrics/metrics_upload_scheduler.cc b/components/metrics/metrics_upload_scheduler.cc
new file mode 100644
index 0000000..b7b7364
--- /dev/null
+++ b/components/metrics/metrics_upload_scheduler.cc
@@ -0,0 +1,92 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/metrics_upload_scheduler.h"
+
+#include <stdint.h>
+
+#include "base/feature_list.h"
+#include "base/metrics/field_trial_params.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/strings/string_number_conversions.h"
+#include "build/build_config.h"
+#include "components/metrics/metrics_scheduler.h"
+
+namespace metrics {
+
+namespace {
+
+// When uploading metrics to the server fails, we progressively wait longer and
+// longer before sending the next log. This backoff process helps reduce load
+// on a server that is having issues.
+// The following is the multiplier we use to expand that inter-log duration.
+const double kBackoffMultiplier = 2;
+
+// The maximum backoff interval in hours.
+const int kMaxBackoffIntervalHours = 24;
+
+// Minutes to wait if we are unable to upload due to data usage cap.
+const int kOverDataUsageIntervalMinutes = 5;
+
+// Increases the upload interval each time it's called, to handle the case
+// where the server is having issues.
+base::TimeDelta BackOffUploadInterval(base::TimeDelta interval) {
+ DCHECK_GT(kBackoffMultiplier, 1.0);
+ interval = base::TimeDelta::FromMicroseconds(static_cast<int64_t>(
+ kBackoffMultiplier * interval.InMicroseconds()));
+
+ base::TimeDelta max_interval =
+ base::TimeDelta::FromHours(kMaxBackoffIntervalHours);
+ if (interval > max_interval || interval.InSeconds() < 0) {
+ interval = max_interval;
+ }
+ return interval;
+}
+
+// Time delay after a log is uploaded successfully before attempting another.
+// On mobile, keeping the radio on is very expensive, so prefer to keep this
+// short and send in bursts.
+base::TimeDelta GetUnsentLogsInterval() {
+ return base::TimeDelta::FromSeconds(3);
+}
+
+// Initial time delay after a log uploaded fails before retrying it.
+base::TimeDelta GetInitialBackoffInterval() {
+ return base::TimeDelta::FromMinutes(5);
+}
+
+} // namespace
+
+MetricsUploadScheduler::MetricsUploadScheduler(
+ const base::Closure& upload_callback)
+ : MetricsScheduler(upload_callback),
+ unsent_logs_interval_(GetUnsentLogsInterval()),
+ initial_backoff_interval_(GetInitialBackoffInterval()),
+ backoff_interval_(initial_backoff_interval_) {}
+
+MetricsUploadScheduler::~MetricsUploadScheduler() {}
+
+void MetricsUploadScheduler::UploadFinished(bool server_is_healthy) {
+ // If the server is having issues, back off. Otherwise, reset to default
+ // (unless there are more logs to send, in which case the next upload should
+ // happen sooner).
+ if (!server_is_healthy) {
+ TaskDone(backoff_interval_);
+ backoff_interval_ = BackOffUploadInterval(backoff_interval_);
+ } else {
+ backoff_interval_ = initial_backoff_interval_;
+ TaskDone(unsent_logs_interval_);
+ }
+}
+
+void MetricsUploadScheduler::StopAndUploadCancelled() {
+ Stop();
+ TaskDone(unsent_logs_interval_);
+}
+
+void MetricsUploadScheduler::UploadOverDataUsageCap() {
+ TaskDone(base::TimeDelta::FromMinutes(kOverDataUsageIntervalMinutes));
+}
+
+} // namespace metrics
diff --git a/components/metrics/metrics_upload_scheduler.h b/components/metrics/metrics_upload_scheduler.h
new file mode 100644
index 0000000..a97b7cb
--- /dev/null
+++ b/components/metrics/metrics_upload_scheduler.h
@@ -0,0 +1,51 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_METRICS_UPLOAD_SCHEDULER_H_
+#define COMPONENTS_METRICS_METRICS_UPLOAD_SCHEDULER_H_
+
+#include "base/callback.h"
+#include "base/feature_list.h"
+#include "base/macros.h"
+#include "base/time/time.h"
+#include "components/metrics/metrics_scheduler.h"
+
+namespace metrics {
+
+// Scheduler task to drive a ReportingService object's uploading.
+class MetricsUploadScheduler : public MetricsScheduler {
+ public:
+ // Creates MetricsUploadScheduler object with the given |upload_callback|
+ // callback to call when uploading should happen. The callback must
+ // arrange to call either UploadFinished or UploadCancelled on completion.
+ explicit MetricsUploadScheduler(const base::Closure& upload_callback);
+ ~MetricsUploadScheduler() override;
+
+ // Callback from MetricsService when a triggered upload finishes.
+ void UploadFinished(bool server_is_healthy);
+
+ // Callback from MetricsService when an upload is cancelled.
+ // Also stops the scheduler.
+ void StopAndUploadCancelled();
+
+ // Callback from MetricsService when an upload is cancelled because it would
+ // be over the allowed data usage cap.
+ void UploadOverDataUsageCap();
+
+ private:
+ // Time to wait between uploads on success.
+ const base::TimeDelta unsent_logs_interval_;
+
+ // Initial time to wait between upload retry attempts.
+ const base::TimeDelta initial_backoff_interval_;
+
+ // Time to wait for the next upload attempt if the next one fails.
+ base::TimeDelta backoff_interval_;
+
+ DISALLOW_COPY_AND_ASSIGN(MetricsUploadScheduler);
+};
+
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_METRICS_UPLOAD_SCHEDULER_H_
diff --git a/components/metrics/net/DEPS b/components/metrics/net/DEPS
new file mode 100644
index 0000000..633e4af
--- /dev/null
+++ b/components/metrics/net/DEPS
@@ -0,0 +1,11 @@
+include_rules = [
+ "+chromeos/dbus",
+ "+chromeos/network",
+ "+components/data_use_measurement/core",
+ "+components/encrypted_messages",
+ "+components/variations",
+ "+net",
+ "+services/network/public/cpp",
+ "+services/network/test",
+ "+third_party/cros_system_api",
+]
diff --git a/components/metrics/net/cellular_logic_helper.cc b/components/metrics/net/cellular_logic_helper.cc
new file mode 100644
index 0000000..33b351e
--- /dev/null
+++ b/components/metrics/net/cellular_logic_helper.cc
@@ -0,0 +1,47 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/net/cellular_logic_helper.h"
+
+#include "net/base/network_change_notifier.h"
+
+namespace metrics {
+
+namespace {
+
+// Standard interval between log uploads, in seconds.
+#if defined(OS_ANDROID) || defined(OS_IOS)
+const int kStandardUploadIntervalSeconds = 5 * 60; // Five minutes.
+const int kStandardUploadIntervalCellularSeconds = 15 * 60; // Fifteen minutes.
+#else
+const int kStandardUploadIntervalSeconds = 30 * 60; // Thirty minutes.
+#endif
+
+#if defined(OS_ANDROID)
+const bool kDefaultCellularLogicEnabled = true;
+#else
+const bool kDefaultCellularLogicEnabled = false;
+#endif
+
+} // namespace
+
+base::TimeDelta GetUploadInterval() {
+#if defined(OS_ANDROID) || defined(OS_IOS)
+ if (IsCellularLogicEnabled())
+ return base::TimeDelta::FromSeconds(kStandardUploadIntervalCellularSeconds);
+#endif
+ return base::TimeDelta::FromSeconds(kStandardUploadIntervalSeconds);
+}
+
+// Returns true if current connection type is cellular and cellular logic is
+// enabled.
+bool IsCellularLogicEnabled() {
+ if (!kDefaultCellularLogicEnabled)
+ return false;
+
+ return net::NetworkChangeNotifier::IsConnectionCellular(
+ net::NetworkChangeNotifier::GetConnectionType());
+}
+
+} // namespace metrics
diff --git a/components/metrics/net/cellular_logic_helper.h b/components/metrics/net/cellular_logic_helper.h
new file mode 100644
index 0000000..b5c7061
--- /dev/null
+++ b/components/metrics/net/cellular_logic_helper.h
@@ -0,0 +1,21 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_NET_CELLULAR_LOGIC_HELPER_H_
+#define COMPONENTS_METRICS_NET_CELLULAR_LOGIC_HELPER_H_
+
+#include "base/time/time.h"
+
+namespace metrics {
+
+// Returns UMA log upload interval based on OS and ongoing cellular experiment.
+base::TimeDelta GetUploadInterval();
+
+// Returns true if current connection type is cellular and user is assigned to
+// experimental group for enabled cellular uploads.
+bool IsCellularLogicEnabled();
+
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_NET_CELLULAR_LOGIC_HELPER_H_
diff --git a/components/metrics/net/net_metrics_log_uploader.cc b/components/metrics/net/net_metrics_log_uploader.cc
new file mode 100644
index 0000000..0d937ce
--- /dev/null
+++ b/components/metrics/net/net_metrics_log_uploader.cc
@@ -0,0 +1,295 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/net/net_metrics_log_uploader.h"
+
+#include "base/base64.h"
+#include "base/feature_list.h"
+#include "base/metrics/histogram_macros.h"
+#include "components/data_use_measurement/core/data_use_user_data.h"
+#include "components/encrypted_messages/encrypted_message.pb.h"
+#include "components/encrypted_messages/message_encrypter.h"
+#include "components/metrics/metrics_log_uploader.h"
+#include "net/base/load_flags.h"
+#include "net/base/url_util.h"
+#include "net/traffic_annotation/network_traffic_annotation.h"
+#include "net/url_request/url_fetcher.h"
+#include "services/network/public/cpp/shared_url_loader_factory.h"
+#include "services/network/public/cpp/simple_url_loader.h"
+#include "third_party/metrics_proto/reporting_info.pb.h"
+#include "url/gurl.h"
+
+namespace {
+
+const base::Feature kHttpRetryFeature{"UMAHttpRetry",
+ base::FEATURE_ENABLED_BY_DEFAULT};
+
+// Constants used for encrypting logs that are sent over HTTP. The
+// corresponding private key is used by the metrics server to decrypt logs.
+const char kEncryptedMessageLabel[] = "metrics log";
+
+const uint8_t kServerPublicKey[] = {
+ 0x51, 0xcc, 0x52, 0x67, 0x42, 0x47, 0x3b, 0x10, 0xe8, 0x63, 0x18,
+ 0x3c, 0x61, 0xa7, 0x96, 0x76, 0x86, 0x91, 0x40, 0x71, 0x39, 0x5f,
+ 0x31, 0x1a, 0x39, 0x5b, 0x76, 0xb1, 0x6b, 0x3d, 0x6a, 0x2b};
+
+const uint32_t kServerPublicKeyVersion = 1;
+
+net::NetworkTrafficAnnotationTag GetNetworkTrafficAnnotation(
+ const metrics::MetricsLogUploader::MetricServiceType& service_type) {
+ // The code in this function should remain so that we won't need a default
+ // case that does not have meaningful annotation.
+ if (service_type == metrics::MetricsLogUploader::UMA) {
+ return net::DefineNetworkTrafficAnnotation("metrics_report_uma", R"(
+ semantics {
+ sender: "Metrics UMA Log Uploader"
+ description:
+ "Report of usage statistics and crash-related data about Chromium. "
+ "Usage statistics contain information such as preferences, button "
+ "clicks, and memory usage and do not include web page URLs or "
+ "personal information. See more at "
+ "https://www.google.com/chrome/browser/privacy/ under 'Usage "
+ "statistics and crash reports'. Usage statistics are tied to a "
+ "pseudonymous machine identifier and not to your email address."
+ trigger:
+ "Reports are automatically generated on startup and at intervals "
+ "while Chromium is running."
+ data:
+ "A protocol buffer with usage statistics and crash related data."
+ destination: GOOGLE_OWNED_SERVICE
+ }
+ policy {
+ cookies_allowed: NO
+ setting:
+ "Users can enable or disable this feature by disabling "
+ "'Automatically send usage statistics and crash reports to Google' "
+ "in Chromium's settings under Advanced Settings, Privacy. The "
+ "feature is enabled by default."
+ chrome_policy {
+ MetricsReportingEnabled {
+ policy_options {mode: MANDATORY}
+ MetricsReportingEnabled: false
+ }
+ }
+ })");
+ }
+ DCHECK_EQ(service_type, metrics::MetricsLogUploader::UKM);
+ return net::DefineNetworkTrafficAnnotation("metrics_report_ukm", R"(
+ semantics {
+ sender: "Metrics UKM Log Uploader"
+ description:
+ "Report of usage statistics that are keyed by URLs to Chromium, "
+ "sent only if the profile has History Sync. This includes "
+ "information about the web pages you visit and your usage of them, "
+ "such as page load speed. This will also include URLs and "
+ "statistics related to downloaded files. If Extension Sync is "
+ "enabled, these statistics will also include information about "
+ "the extensions that have been installed from Chrome Web Store. "
+ "Google only stores usage statistics associated with published "
+ "extensions, and URLs that are known by Google’s search index. "
+ "Usage statistics are tied to a pseudonymous machine identifier "
+ "and not to your email address."
+ trigger:
+ "Reports are automatically generated on startup and at intervals "
+ "while Chromium is running with Sync enabled."
+ data:
+ "A protocol buffer with usage statistics and associated URLs."
+ destination: GOOGLE_OWNED_SERVICE
+ }
+ policy {
+ cookies_allowed: NO
+ setting:
+ "Users can enable or disable this feature by disabling "
+ "'Automatically send usage statistics and crash reports to Google' "
+ "in Chromium's settings under Advanced Settings, Privacy. This is "
+ "only enabled if all active profiles have History/Extension Sync "
+ "enabled without a Sync passphrase."
+ chrome_policy {
+ MetricsReportingEnabled {
+ policy_options {mode: MANDATORY}
+ MetricsReportingEnabled: false
+ }
+ }
+ })");
+}
+
+std::string SerializeReportingInfo(
+ const metrics::ReportingInfo& reporting_info) {
+ std::string result;
+ std::string bytes;
+ bool success = reporting_info.SerializeToString(&bytes);
+ DCHECK(success);
+ base::Base64Encode(bytes, &result);
+ return result;
+}
+
+void RecordUploadSizeForServiceTypeHistograms(
+ int64_t content_length,
+ metrics::MetricsLogUploader::MetricServiceType service_type) {
+ switch (service_type) {
+ case metrics::MetricsLogUploader::UMA:
+ UMA_HISTOGRAM_COUNTS_1M("UMA.LogUploader.UploadSize", content_length);
+ break;
+ case metrics::MetricsLogUploader::UKM:
+ UMA_HISTOGRAM_COUNTS_1M("UKM.LogUploader.UploadSize", content_length);
+ break;
+ }
+}
+
+} // namespace
+
+namespace metrics {
+
+NetMetricsLogUploader::NetMetricsLogUploader(
+ scoped_refptr<network::SharedURLLoaderFactory> url_loader_factory,
+ base::StringPiece server_url,
+ base::StringPiece mime_type,
+ MetricsLogUploader::MetricServiceType service_type,
+ const MetricsLogUploader::UploadCallback& on_upload_complete)
+ : url_loader_factory_(std::move(url_loader_factory)),
+ server_url_(server_url),
+ mime_type_(mime_type.data(), mime_type.size()),
+ service_type_(service_type),
+ on_upload_complete_(on_upload_complete) {}
+
+NetMetricsLogUploader::NetMetricsLogUploader(
+ scoped_refptr<network::SharedURLLoaderFactory> url_loader_factory,
+ base::StringPiece server_url,
+ base::StringPiece insecure_server_url,
+ base::StringPiece mime_type,
+ MetricsLogUploader::MetricServiceType service_type,
+ const MetricsLogUploader::UploadCallback& on_upload_complete)
+ : url_loader_factory_(std::move(url_loader_factory)),
+ server_url_(server_url),
+ insecure_server_url_(insecure_server_url),
+ mime_type_(mime_type.data(), mime_type.size()),
+ service_type_(service_type),
+ on_upload_complete_(on_upload_complete) {}
+
+NetMetricsLogUploader::~NetMetricsLogUploader() {
+}
+
+void NetMetricsLogUploader::UploadLog(const std::string& compressed_log_data,
+ const std::string& log_hash,
+ const ReportingInfo& reporting_info) {
+ // If this attempt is a retry, there was a network error, the last attempt was
+ // over https, and there is an insecure url set, attempt this upload over
+ // HTTP.
+ // Currently we only retry over HTTP if the retry-uma-over-http flag is set.
+ if (!insecure_server_url_.is_empty() && reporting_info.attempt_count() > 1 &&
+ reporting_info.last_error_code() != 0 &&
+ reporting_info.last_attempt_was_https() &&
+ base::FeatureList::IsEnabled(kHttpRetryFeature)) {
+ UploadLogToURL(compressed_log_data, log_hash, reporting_info,
+ insecure_server_url_);
+ return;
+ }
+ UploadLogToURL(compressed_log_data, log_hash, reporting_info, server_url_);
+}
+
+void NetMetricsLogUploader::UploadLogToURL(
+ const std::string& compressed_log_data,
+ const std::string& log_hash,
+ const ReportingInfo& reporting_info,
+ const GURL& url) {
+ DCHECK(!log_hash.empty());
+
+ // TODO(crbug.com/808498): Restore the data use measurement when bug is fixed.
+
+ auto resource_request = std::make_unique<network::ResourceRequest>();
+ resource_request->url = url;
+ // Drop cookies and auth data.
+ resource_request->allow_credentials = false;
+ resource_request->method = "POST";
+
+ std::string reporting_info_string = SerializeReportingInfo(reporting_info);
+ // If we are not using HTTPS for this upload, encrypt it. We do not encrypt
+ // requests to localhost to allow testing with a local collector that doesn't
+ // have decryption enabled.
+ bool should_encrypt =
+ !url.SchemeIs(url::kHttpsScheme) && !net::IsLocalhost(url);
+ if (should_encrypt) {
+ std::string encrypted_hash;
+ std::string base64_encoded_hash;
+ if (!EncryptString(log_hash, &encrypted_hash)) {
+ on_upload_complete_.Run(0, net::ERR_FAILED, false);
+ return;
+ }
+ base::Base64Encode(encrypted_hash, &base64_encoded_hash);
+ resource_request->headers.SetHeader("X-Chrome-UMA-Log-SHA1",
+ base64_encoded_hash);
+
+ std::string encrypted_reporting_info;
+ std::string base64_reporting_info;
+ if (!EncryptString(reporting_info_string, &encrypted_reporting_info)) {
+ on_upload_complete_.Run(0, net::ERR_FAILED, false);
+ return;
+ }
+ base::Base64Encode(encrypted_reporting_info, &base64_reporting_info);
+ resource_request->headers.SetHeader("X-Chrome-UMA-ReportingInfo",
+ base64_reporting_info);
+ } else {
+ resource_request->headers.SetHeader("X-Chrome-UMA-Log-SHA1", log_hash);
+ resource_request->headers.SetHeader("X-Chrome-UMA-ReportingInfo",
+ reporting_info_string);
+ // Tell the server that we're uploading gzipped protobufs only if we are not
+ // encrypting, since encrypted messages have to be decrypted server side
+ // after decryption, not before.
+ resource_request->headers.SetHeader("content-encoding", "gzip");
+ }
+
+ url_loader_ = network::SimpleURLLoader::Create(
+ std::move(resource_request), GetNetworkTrafficAnnotation(service_type_));
+
+ if (should_encrypt) {
+ std::string encrypted_message;
+ if (!EncryptString(compressed_log_data, &encrypted_message)) {
+ url_loader_.reset();
+ on_upload_complete_.Run(0, net::ERR_FAILED, false);
+ return;
+ }
+ url_loader_->AttachStringForUpload(encrypted_message, mime_type_);
+ RecordUploadSizeForServiceTypeHistograms(encrypted_message.size(),
+ service_type_);
+ } else {
+ url_loader_->AttachStringForUpload(compressed_log_data, mime_type_);
+ RecordUploadSizeForServiceTypeHistograms(compressed_log_data.size(),
+ service_type_);
+ }
+
+ // It's safe to use |base::Unretained(this)| here, because |this| owns
+ // the |url_loader_|, and the callback will be cancelled if the |url_loader_|
+ // is destroyed.
+ url_loader_->DownloadToStringOfUnboundedSizeUntilCrashAndDie(
+ url_loader_factory_.get(),
+ base::BindOnce(&NetMetricsLogUploader::OnURLLoadComplete,
+ base::Unretained(this)));
+}
+
+// The callback is only invoked if |url_loader_| it was bound against is alive.
+void NetMetricsLogUploader::OnURLLoadComplete(
+ std::unique_ptr<std::string> response_body) {
+ int response_code = -1;
+ if (url_loader_->ResponseInfo() && url_loader_->ResponseInfo()->headers)
+ response_code = url_loader_->ResponseInfo()->headers->response_code();
+
+ int error_code = url_loader_->NetError();
+
+ bool was_https = url_loader_->GetFinalURL().SchemeIs(url::kHttpsScheme);
+ url_loader_.reset();
+ on_upload_complete_.Run(response_code, error_code, was_https);
+}
+
+bool NetMetricsLogUploader::EncryptString(const std::string& plaintext,
+ std::string* encrypted) {
+ encrypted_messages::EncryptedMessage encrypted_message;
+ if (!encrypted_messages::EncryptSerializedMessage(
+ kServerPublicKey, kServerPublicKeyVersion, kEncryptedMessageLabel,
+ plaintext, &encrypted_message) ||
+ !encrypted_message.SerializeToString(encrypted)) {
+ return false;
+ }
+ return true;
+}
+} // namespace metrics
diff --git a/components/metrics/net/net_metrics_log_uploader.h b/components/metrics/net/net_metrics_log_uploader.h
new file mode 100644
index 0000000..99fce26
--- /dev/null
+++ b/components/metrics/net/net_metrics_log_uploader.h
@@ -0,0 +1,86 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_NET_NET_METRICS_LOG_UPLOADER_H_
+#define COMPONENTS_METRICS_NET_NET_METRICS_LOG_UPLOADER_H_
+
+#include <memory>
+#include <string>
+
+#include "base/macros.h"
+#include "base/strings/string_piece.h"
+#include "components/metrics/metrics_log_uploader.h"
+#include "third_party/metrics_proto/reporting_info.pb.h"
+#include "url/gurl.h"
+
+namespace network {
+class SharedURLLoaderFactory;
+class SimpleURLLoader;
+} // namespace network
+
+namespace metrics {
+
+// Implementation of MetricsLogUploader using the Chrome network stack.
+class NetMetricsLogUploader : public MetricsLogUploader {
+ public:
+ // Constructs a NetMetricsLogUploader which uploads data to |server_url| with
+ // the specified |mime_type|. The |service_type| marks which service the
+ // data usage should be attributed to. The |on_upload_complete| callback will
+ // be called with the HTTP response code of the upload or with -1 on an error.
+ NetMetricsLogUploader(
+ scoped_refptr<network::SharedURLLoaderFactory> url_loader_factory,
+ base::StringPiece server_url,
+ base::StringPiece mime_type,
+ MetricsLogUploader::MetricServiceType service_type,
+ const MetricsLogUploader::UploadCallback& on_upload_complete);
+
+ // This constructor allows a secondary non-HTTPS URL to be passed in as
+ // |insecure_server_url|. That URL is used as a fallback if a connection
+ // to |server_url| fails, requests are encrypted when sent to an HTTP URL.
+ NetMetricsLogUploader(
+ scoped_refptr<network::SharedURLLoaderFactory> url_loader_factory,
+ base::StringPiece server_url,
+ base::StringPiece insecure_server_url,
+ base::StringPiece mime_type,
+ MetricsLogUploader::MetricServiceType service_type,
+ const MetricsLogUploader::UploadCallback& on_upload_complete);
+
+ ~NetMetricsLogUploader() override;
+
+ // MetricsLogUploader:
+ // Uploads a log to the server_url specified in the constructor.
+ void UploadLog(const std::string& compressed_log_data,
+ const std::string& log_hash,
+ const ReportingInfo& reporting_info) override;
+
+ private:
+ // Uploads a log to a URL passed as a parameter.
+ void UploadLogToURL(const std::string& compressed_log_data,
+ const std::string& log_hash,
+ const ReportingInfo& reporting_info,
+ const GURL& url);
+
+ void OnURLLoadComplete(std::unique_ptr<std::string> response_body);
+
+ // Encrypts a |plaintext| string, using the encrypted_messages component,
+ // returns |encrypted| which is a serialized EncryptedMessage object.
+ bool EncryptString(const std::string& plaintext, std::string* encrypted);
+
+ // The URLLoader factory for loads done using the network stack.
+ scoped_refptr<network::SharedURLLoaderFactory> url_loader_factory_;
+
+ const GURL server_url_;
+ const GURL insecure_server_url_;
+ const std::string mime_type_;
+ const MetricsLogUploader ::MetricServiceType service_type_;
+ const MetricsLogUploader::UploadCallback on_upload_complete_;
+ // The outstanding transmission appears as a URL Fetch operation.
+ std::unique_ptr<network::SimpleURLLoader> url_loader_;
+
+ DISALLOW_COPY_AND_ASSIGN(NetMetricsLogUploader);
+};
+
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_NET_NET_METRICS_LOG_UPLOADER_H_
diff --git a/components/metrics/net/net_metrics_log_uploader_unittest.cc b/components/metrics/net/net_metrics_log_uploader_unittest.cc
new file mode 100644
index 0000000..f6aecdc
--- /dev/null
+++ b/components/metrics/net/net_metrics_log_uploader_unittest.cc
@@ -0,0 +1,187 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/net/net_metrics_log_uploader.h"
+
+#include "base/base64.h"
+#include "base/bind.h"
+#include "base/macros.h"
+#include "base/run_loop.h"
+#include "base/test/bind_test_util.h"
+#include "base/test/scoped_task_environment.h"
+#include "components/encrypted_messages/encrypted_message.pb.h"
+#include "net/url_request/test_url_fetcher_factory.h"
+#include "services/network/public/cpp/weak_wrapper_shared_url_loader_factory.h"
+#include "services/network/test/test_url_loader_factory.h"
+#include "services/network/test/test_utils.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/metrics_proto/reporting_info.pb.h"
+#include "third_party/zlib/google/compression_utils.h"
+#include "url/gurl.h"
+
+namespace metrics {
+
+class NetMetricsLogUploaderTest : public testing::Test {
+ public:
+ NetMetricsLogUploaderTest()
+ : on_upload_complete_count_(0),
+ test_shared_url_loader_factory_(
+ base::MakeRefCounted<network::WeakWrapperSharedURLLoaderFactory>(
+ &test_url_loader_factory_)) {
+ test_url_loader_factory_.SetInterceptor(base::BindLambdaForTesting(
+ [&](const network::ResourceRequest& request) {
+ upload_data_ = network::GetUploadData(request);
+ headers_ = request.headers;
+ loop_.Quit();
+ }));
+ }
+
+ void CreateAndOnUploadCompleteReuseUploader() {
+ ReportingInfo reporting_info;
+ reporting_info.set_attempt_count(10);
+ uploader_.reset(new NetMetricsLogUploader(
+ test_shared_url_loader_factory_, "https://dummy_server", "dummy_mime",
+ MetricsLogUploader::UMA,
+ base::Bind(&NetMetricsLogUploaderTest::OnUploadCompleteReuseUploader,
+ base::Unretained(this))));
+ uploader_->UploadLog("initial_dummy_data", "initial_dummy_hash",
+ reporting_info);
+ }
+
+ void CreateUploaderAndUploadToSecureURL(const std::string& url) {
+ ReportingInfo dummy_reporting_info;
+ uploader_.reset(new NetMetricsLogUploader(
+ test_shared_url_loader_factory_, url, "dummy_mime",
+ MetricsLogUploader::UMA,
+ base::Bind(&NetMetricsLogUploaderTest::DummyOnUploadComplete,
+ base::Unretained(this))));
+ uploader_->UploadLog("dummy_data", "dummy_hash", dummy_reporting_info);
+ }
+
+ void CreateUploaderAndUploadToInsecureURL() {
+ ReportingInfo dummy_reporting_info;
+ uploader_.reset(new NetMetricsLogUploader(
+ test_shared_url_loader_factory_, "http://dummy_insecure_server",
+ "dummy_mime", MetricsLogUploader::UMA,
+ base::Bind(&NetMetricsLogUploaderTest::DummyOnUploadComplete,
+ base::Unretained(this))));
+ std::string compressed_message;
+ // Compress the data since the encryption code expects a compressed log,
+ // and tries to decompress it before encrypting it.
+ compression::GzipCompress("dummy_data", &compressed_message);
+ uploader_->UploadLog(compressed_message, "dummy_hash",
+ dummy_reporting_info);
+ }
+
+ void DummyOnUploadComplete(int response_code,
+ int error_code,
+ bool was_https) {}
+
+ void OnUploadCompleteReuseUploader(int response_code,
+ int error_code,
+ bool was_https) {
+ ++on_upload_complete_count_;
+ if (on_upload_complete_count_ == 1) {
+ ReportingInfo reporting_info;
+ reporting_info.set_attempt_count(20);
+ uploader_->UploadLog("dummy_data", "dummy_hash", reporting_info);
+ }
+ }
+
+ network::TestURLLoaderFactory::PendingRequest* GetPendingRequest(
+ size_t index) {
+ if (index >= test_url_loader_factory_.pending_requests()->size())
+ return nullptr;
+ auto* request = &(*test_url_loader_factory_.pending_requests())[index];
+ DCHECK(request);
+ return request;
+ }
+
+ int on_upload_complete_count() const {
+ return on_upload_complete_count_;
+ }
+
+ network::TestURLLoaderFactory* test_url_loader_factory() {
+ return &test_url_loader_factory_;
+ }
+
+ const net::HttpRequestHeaders& last_request_headers() { return headers_; }
+
+ const std::string& last_upload_data() { return upload_data_; }
+
+ void WaitForRequest() { loop_.Run(); }
+
+ private:
+ std::unique_ptr<NetMetricsLogUploader> uploader_;
+ int on_upload_complete_count_;
+
+ network::TestURLLoaderFactory test_url_loader_factory_;
+ scoped_refptr<network::SharedURLLoaderFactory>
+ test_shared_url_loader_factory_;
+
+ base::test::ScopedTaskEnvironment scoped_task_environment_;
+
+ base::RunLoop loop_;
+ std::string upload_data_;
+ net::HttpRequestHeaders headers_;
+
+ DISALLOW_COPY_AND_ASSIGN(NetMetricsLogUploaderTest);
+};
+
+void CheckReportingInfoHeader(net::HttpRequestHeaders headers,
+ int expected_attempt_count) {
+ std::string reporting_info_base64;
+ EXPECT_TRUE(
+ headers.GetHeader("X-Chrome-UMA-ReportingInfo", &reporting_info_base64));
+ std::string reporting_info_string;
+ EXPECT_TRUE(
+ base::Base64Decode(reporting_info_base64, &reporting_info_string));
+ ReportingInfo reporting_info;
+ EXPECT_TRUE(reporting_info.ParseFromString(reporting_info_string));
+ EXPECT_EQ(reporting_info.attempt_count(), expected_attempt_count);
+}
+
+TEST_F(NetMetricsLogUploaderTest, OnUploadCompleteReuseUploader) {
+ CreateAndOnUploadCompleteReuseUploader();
+ WaitForRequest();
+
+ // Mimic the initial fetcher callback.
+ CheckReportingInfoHeader(last_request_headers(), 10);
+ test_url_loader_factory()->SimulateResponseWithoutRemovingFromPendingList(
+ GetPendingRequest(0), "");
+
+ // Mimic the second fetcher callback.
+ CheckReportingInfoHeader(last_request_headers(), 20);
+ test_url_loader_factory()->SimulateResponseWithoutRemovingFromPendingList(
+ GetPendingRequest(1), "");
+
+ EXPECT_EQ(on_upload_complete_count(), 2);
+}
+
+// Test that attempting to upload to an HTTP URL results in an encrypted
+// message.
+TEST_F(NetMetricsLogUploaderTest, MessageOverHTTPIsEncrypted) {
+ CreateUploaderAndUploadToInsecureURL();
+ WaitForRequest();
+ encrypted_messages::EncryptedMessage message;
+ EXPECT_TRUE(message.ParseFromString(last_upload_data()));
+}
+
+// Test that attempting to upload to an HTTPS URL results in an unencrypted
+// message.
+TEST_F(NetMetricsLogUploaderTest, MessageOverHTTPSIsNotEncrypted) {
+ CreateUploaderAndUploadToSecureURL("https://dummy_secure_server");
+ WaitForRequest();
+ EXPECT_EQ(last_upload_data(), "dummy_data");
+}
+
+// Test that attempting to upload to localhost over http results in an
+// unencrypted message.
+TEST_F(NetMetricsLogUploaderTest, MessageOverHTTPLocalhostIsNotEncrypted) {
+ CreateUploaderAndUploadToSecureURL("http://localhost");
+ WaitForRequest();
+ EXPECT_EQ(last_upload_data(), "dummy_data");
+}
+
+} // namespace metrics
diff --git a/components/metrics/net/network_metrics_provider.cc b/components/metrics/net/network_metrics_provider.cc
new file mode 100644
index 0000000..d175648
--- /dev/null
+++ b/components/metrics/net/network_metrics_provider.cc
@@ -0,0 +1,483 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/net/network_metrics_provider.h"
+
+#include <stdint.h>
+
+#include <string>
+#include <vector>
+
+#include "base/bind_helpers.h"
+#include "base/callback_forward.h"
+#include "base/compiler_specific.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/metrics/sparse_histogram.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_split.h"
+#include "base/strings/string_util.h"
+#include "base/task/post_task.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "build/build_config.h"
+#include "net/base/net_errors.h"
+#include "net/nqe/effective_connection_type_observer.h"
+#include "net/nqe/network_quality_estimator.h"
+
+#if defined(OS_CHROMEOS)
+#include "components/metrics/net/wifi_access_point_info_provider_chromeos.h"
+#endif // OS_CHROMEOS
+
+namespace metrics {
+
+SystemProfileProto::Network::EffectiveConnectionType
+ConvertEffectiveConnectionType(
+ net::EffectiveConnectionType effective_connection_type) {
+ switch (effective_connection_type) {
+ case net::EFFECTIVE_CONNECTION_TYPE_UNKNOWN:
+ return SystemProfileProto::Network::EFFECTIVE_CONNECTION_TYPE_UNKNOWN;
+ case net::EFFECTIVE_CONNECTION_TYPE_SLOW_2G:
+ return SystemProfileProto::Network::EFFECTIVE_CONNECTION_TYPE_SLOW_2G;
+ case net::EFFECTIVE_CONNECTION_TYPE_2G:
+ return SystemProfileProto::Network::EFFECTIVE_CONNECTION_TYPE_2G;
+ case net::EFFECTIVE_CONNECTION_TYPE_3G:
+ return SystemProfileProto::Network::EFFECTIVE_CONNECTION_TYPE_3G;
+ case net::EFFECTIVE_CONNECTION_TYPE_4G:
+ return SystemProfileProto::Network::EFFECTIVE_CONNECTION_TYPE_4G;
+ case net::EFFECTIVE_CONNECTION_TYPE_OFFLINE:
+ return SystemProfileProto::Network::EFFECTIVE_CONNECTION_TYPE_OFFLINE;
+ case net::EFFECTIVE_CONNECTION_TYPE_LAST:
+ NOTREACHED();
+ return SystemProfileProto::Network::EFFECTIVE_CONNECTION_TYPE_UNKNOWN;
+ }
+ NOTREACHED();
+ return SystemProfileProto::Network::EFFECTIVE_CONNECTION_TYPE_UNKNOWN;
+}
+
+// Listens to the changes in the effective conection type.
+class NetworkMetricsProvider::EffectiveConnectionTypeObserver
+ : public net::EffectiveConnectionTypeObserver {
+ public:
+ // |network_quality_estimator| is used to provide the network quality
+ // estimates. Guaranteed to be non-null. |callback| is run on
+ // |callback_task_runner|, and provides notifications about the changes in the
+ // effective connection type.
+ EffectiveConnectionTypeObserver(
+ base::Callback<void(net::EffectiveConnectionType)> callback,
+ const scoped_refptr<base::SequencedTaskRunner>& callback_task_runner)
+ : network_quality_estimator_(nullptr),
+ callback_(callback),
+ callback_task_runner_(callback_task_runner) {
+ DCHECK(callback_);
+ DCHECK(callback_task_runner_);
+ // |this| is initialized and used on the IO thread using
+ // |network_quality_task_runner_|.
+ thread_checker_.DetachFromThread();
+ }
+
+ ~EffectiveConnectionTypeObserver() override {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ if (network_quality_estimator_)
+ network_quality_estimator_->RemoveEffectiveConnectionTypeObserver(this);
+ }
+
+ // Initializes |this| on IO thread using |network_quality_task_runner_|. This
+ // is the same thread on which |network_quality_estimator| lives.
+ void Init(net::NetworkQualityEstimator* network_quality_estimator) {
+ network_quality_estimator_ = network_quality_estimator;
+ if (network_quality_estimator_)
+ network_quality_estimator_->AddEffectiveConnectionTypeObserver(this);
+ }
+
+ private:
+ // net::EffectiveConnectionTypeObserver:
+ void OnEffectiveConnectionTypeChanged(
+ net::EffectiveConnectionType type) override {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ callback_task_runner_->PostTask(FROM_HERE, base::BindOnce(callback_, type));
+ }
+
+ // Notifies |this| when there is a change in the effective connection type.
+ net::NetworkQualityEstimator* network_quality_estimator_;
+
+ // Called when the effective connection type is changed.
+ base::Callback<void(net::EffectiveConnectionType)> callback_;
+
+ // Task runner on which |callback_| is run.
+ scoped_refptr<base::SequencedTaskRunner> callback_task_runner_;
+
+ base::ThreadChecker thread_checker_;
+
+ DISALLOW_COPY_AND_ASSIGN(EffectiveConnectionTypeObserver);
+};
+
+NetworkMetricsProvider::NetworkMetricsProvider(
+ std::unique_ptr<NetworkQualityEstimatorProvider>
+ network_quality_estimator_provider)
+ : connection_type_is_ambiguous_(false),
+ network_change_notifier_initialized_(false),
+ wifi_phy_layer_protocol_is_ambiguous_(false),
+ wifi_phy_layer_protocol_(net::WIFI_PHY_LAYER_PROTOCOL_UNKNOWN),
+ total_aborts_(0),
+ total_codes_(0),
+ network_quality_estimator_provider_(
+ std::move(network_quality_estimator_provider)),
+ effective_connection_type_(net::EFFECTIVE_CONNECTION_TYPE_UNKNOWN),
+ min_effective_connection_type_(net::EFFECTIVE_CONNECTION_TYPE_UNKNOWN),
+ max_effective_connection_type_(net::EFFECTIVE_CONNECTION_TYPE_UNKNOWN),
+ weak_ptr_factory_(this) {
+ net::NetworkChangeNotifier::AddNetworkChangeObserver(this);
+ connection_type_ = net::NetworkChangeNotifier::GetConnectionType();
+ if (connection_type_ != net::NetworkChangeNotifier::CONNECTION_UNKNOWN)
+ network_change_notifier_initialized_ = true;
+
+ ProbeWifiPHYLayerProtocol();
+
+ if (network_quality_estimator_provider_) {
+ effective_connection_type_observer_.reset(
+ new EffectiveConnectionTypeObserver(
+ base::Bind(
+ &NetworkMetricsProvider::OnEffectiveConnectionTypeChanged,
+ base::Unretained(this)),
+ base::ThreadTaskRunnerHandle::Get()));
+
+ // Get the network quality estimator and initialize
+ // |effective_connection_type_observer_| on the same task runner on which
+ // the network quality estimator lives. It is safe to use base::Unretained
+ // here since both |network_quality_estimator_provider_| and
+ // |effective_connection_type_observer_| are owned by |this|, and
+ // |network_quality_estimator_provider_| is deleted before
+ // |effective_connection_type_observer_|.
+ network_quality_estimator_provider_->PostReplyNetworkQualityEstimator(
+ base::Bind(
+ &EffectiveConnectionTypeObserver::Init,
+ base::Unretained(effective_connection_type_observer_.get())));
+ }
+}
+
+NetworkMetricsProvider::~NetworkMetricsProvider() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ net::NetworkChangeNotifier::RemoveNetworkChangeObserver(this);
+
+ if (network_quality_estimator_provider_) {
+ scoped_refptr<base::SequencedTaskRunner> network_quality_task_runner =
+ network_quality_estimator_provider_->GetTaskRunner();
+
+ // |network_quality_estimator_provider_| must be deleted before
+ // |effective_connection_type_observer_| since
+ // |effective_connection_type_observer_| may callback into
+ // |effective_connection_type_observer_|.
+ network_quality_estimator_provider_.reset();
+
+ if (network_quality_task_runner &&
+ !network_quality_task_runner->DeleteSoon(
+ FROM_HERE, effective_connection_type_observer_.release())) {
+ NOTREACHED() << " ECT observer was not deleted successfully";
+ }
+ }
+}
+
+void NetworkMetricsProvider::ProvideCurrentSessionData(
+ ChromeUserMetricsExtension*) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ // ProvideCurrentSessionData is called on the main thread, at the time a
+ // metrics record is being finalized.
+ net::NetworkChangeNotifier::FinalizingMetricsLogRecord();
+ LogAggregatedMetrics();
+}
+
+void NetworkMetricsProvider::ProvideSystemProfileMetrics(
+ SystemProfileProto* system_profile) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(!connection_type_is_ambiguous_ ||
+ network_change_notifier_initialized_);
+ SystemProfileProto::Network* network = system_profile->mutable_network();
+ network->set_connection_type_is_ambiguous(connection_type_is_ambiguous_);
+ network->set_connection_type(GetConnectionType());
+ network->set_wifi_phy_layer_protocol_is_ambiguous(
+ wifi_phy_layer_protocol_is_ambiguous_);
+ network->set_wifi_phy_layer_protocol(GetWifiPHYLayerProtocol());
+
+ network->set_min_effective_connection_type(
+ ConvertEffectiveConnectionType(min_effective_connection_type_));
+ network->set_max_effective_connection_type(
+ ConvertEffectiveConnectionType(max_effective_connection_type_));
+
+ // Update the connection type. Note that this is necessary to set the network
+ // type to "none" if there is no network connection for an entire UMA logging
+ // window, since OnConnectionTypeChanged() ignores transitions to the "none"
+ // state.
+ connection_type_ = net::NetworkChangeNotifier::GetConnectionType();
+ if (connection_type_ != net::NetworkChangeNotifier::CONNECTION_UNKNOWN)
+ network_change_notifier_initialized_ = true;
+ // Reset the "ambiguous" flags, since a new metrics log session has started.
+ connection_type_is_ambiguous_ = false;
+ wifi_phy_layer_protocol_is_ambiguous_ = false;
+ min_effective_connection_type_ = effective_connection_type_;
+ max_effective_connection_type_ = effective_connection_type_;
+
+ if (!wifi_access_point_info_provider_) {
+#if defined(OS_CHROMEOS)
+ wifi_access_point_info_provider_.reset(
+ new WifiAccessPointInfoProviderChromeos());
+#else
+ wifi_access_point_info_provider_.reset(
+ new WifiAccessPointInfoProvider());
+#endif // OS_CHROMEOS
+ }
+
+ // Connected wifi access point information.
+ WifiAccessPointInfoProvider::WifiAccessPointInfo info;
+ if (wifi_access_point_info_provider_->GetInfo(&info))
+ WriteWifiAccessPointProto(info, network);
+}
+
+void NetworkMetricsProvider::OnNetworkChanged(
+ net::NetworkChangeNotifier::ConnectionType type) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ // To avoid reporting an ambiguous connection type for users on flaky
+ // connections, ignore transitions to the "none" state. Note that the
+ // connection type is refreshed in ProvideSystemProfileMetrics() each time a
+ // new UMA logging window begins, so users who genuinely transition to offline
+ // mode for an extended duration will still be at least partially represented
+ // in the metrics logs.
+ if (type == net::NetworkChangeNotifier::CONNECTION_NONE) {
+ network_change_notifier_initialized_ = true;
+ return;
+ }
+
+ DCHECK(network_change_notifier_initialized_ ||
+ connection_type_ == net::NetworkChangeNotifier::CONNECTION_UNKNOWN);
+
+ if (type != connection_type_ &&
+ connection_type_ != net::NetworkChangeNotifier::CONNECTION_NONE &&
+ network_change_notifier_initialized_) {
+ // If |network_change_notifier_initialized_| is false, it implies that this
+ // is the first connection change callback received from network change
+ // notifier, and the previous connection type was CONNECTION_UNKNOWN. In
+ // that case, connection type should not be marked as ambiguous since there
+ // was no actual change in the connection type.
+ connection_type_is_ambiguous_ = true;
+ }
+
+ network_change_notifier_initialized_ = true;
+ connection_type_ = type;
+
+ ProbeWifiPHYLayerProtocol();
+}
+
+SystemProfileProto::Network::ConnectionType
+NetworkMetricsProvider::GetConnectionType() const {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ switch (connection_type_) {
+ case net::NetworkChangeNotifier::CONNECTION_NONE:
+ return SystemProfileProto::Network::CONNECTION_NONE;
+ case net::NetworkChangeNotifier::CONNECTION_UNKNOWN:
+ return SystemProfileProto::Network::CONNECTION_UNKNOWN;
+ case net::NetworkChangeNotifier::CONNECTION_ETHERNET:
+ return SystemProfileProto::Network::CONNECTION_ETHERNET;
+ case net::NetworkChangeNotifier::CONNECTION_WIFI:
+ return SystemProfileProto::Network::CONNECTION_WIFI;
+ case net::NetworkChangeNotifier::CONNECTION_2G:
+ return SystemProfileProto::Network::CONNECTION_2G;
+ case net::NetworkChangeNotifier::CONNECTION_3G:
+ return SystemProfileProto::Network::CONNECTION_3G;
+ case net::NetworkChangeNotifier::CONNECTION_4G:
+ return SystemProfileProto::Network::CONNECTION_4G;
+ case net::NetworkChangeNotifier::CONNECTION_BLUETOOTH:
+ return SystemProfileProto::Network::CONNECTION_BLUETOOTH;
+ }
+ NOTREACHED();
+ return SystemProfileProto::Network::CONNECTION_UNKNOWN;
+}
+
+SystemProfileProto::Network::WifiPHYLayerProtocol
+NetworkMetricsProvider::GetWifiPHYLayerProtocol() const {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ switch (wifi_phy_layer_protocol_) {
+ case net::WIFI_PHY_LAYER_PROTOCOL_NONE:
+ return SystemProfileProto::Network::WIFI_PHY_LAYER_PROTOCOL_NONE;
+ case net::WIFI_PHY_LAYER_PROTOCOL_ANCIENT:
+ return SystemProfileProto::Network::WIFI_PHY_LAYER_PROTOCOL_ANCIENT;
+ case net::WIFI_PHY_LAYER_PROTOCOL_A:
+ return SystemProfileProto::Network::WIFI_PHY_LAYER_PROTOCOL_A;
+ case net::WIFI_PHY_LAYER_PROTOCOL_B:
+ return SystemProfileProto::Network::WIFI_PHY_LAYER_PROTOCOL_B;
+ case net::WIFI_PHY_LAYER_PROTOCOL_G:
+ return SystemProfileProto::Network::WIFI_PHY_LAYER_PROTOCOL_G;
+ case net::WIFI_PHY_LAYER_PROTOCOL_N:
+ return SystemProfileProto::Network::WIFI_PHY_LAYER_PROTOCOL_N;
+ case net::WIFI_PHY_LAYER_PROTOCOL_UNKNOWN:
+ return SystemProfileProto::Network::WIFI_PHY_LAYER_PROTOCOL_UNKNOWN;
+ }
+ NOTREACHED();
+ return SystemProfileProto::Network::WIFI_PHY_LAYER_PROTOCOL_UNKNOWN;
+}
+
+void NetworkMetricsProvider::ProbeWifiPHYLayerProtocol() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ base::PostTaskWithTraitsAndReplyWithResult(
+ FROM_HERE,
+ {base::MayBlock(), base::TaskPriority::BEST_EFFORT,
+ base::TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN},
+ base::BindOnce(&net::GetWifiPHYLayerProtocol),
+ base::BindOnce(&NetworkMetricsProvider::OnWifiPHYLayerProtocolResult,
+ weak_ptr_factory_.GetWeakPtr()));
+}
+
+void NetworkMetricsProvider::OnWifiPHYLayerProtocolResult(
+ net::WifiPHYLayerProtocol mode) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ if (wifi_phy_layer_protocol_ != net::WIFI_PHY_LAYER_PROTOCOL_UNKNOWN &&
+ mode != wifi_phy_layer_protocol_) {
+ wifi_phy_layer_protocol_is_ambiguous_ = true;
+ }
+ wifi_phy_layer_protocol_ = mode;
+}
+
+void NetworkMetricsProvider::WriteWifiAccessPointProto(
+ const WifiAccessPointInfoProvider::WifiAccessPointInfo& info,
+ SystemProfileProto::Network* network_proto) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ SystemProfileProto::Network::WifiAccessPoint* access_point_info =
+ network_proto->mutable_access_point_info();
+ SystemProfileProto::Network::WifiAccessPoint::SecurityMode security =
+ SystemProfileProto::Network::WifiAccessPoint::SECURITY_UNKNOWN;
+ switch (info.security) {
+ case WifiAccessPointInfoProvider::WIFI_SECURITY_NONE:
+ security = SystemProfileProto::Network::WifiAccessPoint::SECURITY_NONE;
+ break;
+ case WifiAccessPointInfoProvider::WIFI_SECURITY_WPA:
+ security = SystemProfileProto::Network::WifiAccessPoint::SECURITY_WPA;
+ break;
+ case WifiAccessPointInfoProvider::WIFI_SECURITY_WEP:
+ security = SystemProfileProto::Network::WifiAccessPoint::SECURITY_WEP;
+ break;
+ case WifiAccessPointInfoProvider::WIFI_SECURITY_RSN:
+ security = SystemProfileProto::Network::WifiAccessPoint::SECURITY_RSN;
+ break;
+ case WifiAccessPointInfoProvider::WIFI_SECURITY_802_1X:
+ security = SystemProfileProto::Network::WifiAccessPoint::SECURITY_802_1X;
+ break;
+ case WifiAccessPointInfoProvider::WIFI_SECURITY_PSK:
+ security = SystemProfileProto::Network::WifiAccessPoint::SECURITY_PSK;
+ break;
+ case WifiAccessPointInfoProvider::WIFI_SECURITY_UNKNOWN:
+ security = SystemProfileProto::Network::WifiAccessPoint::SECURITY_UNKNOWN;
+ break;
+ }
+ access_point_info->set_security_mode(security);
+
+ // |bssid| is xx:xx:xx:xx:xx:xx, extract the first three components and
+ // pack into a uint32_t.
+ std::string bssid = info.bssid;
+ if (bssid.size() == 17 && bssid[2] == ':' && bssid[5] == ':' &&
+ bssid[8] == ':' && bssid[11] == ':' && bssid[14] == ':') {
+ std::string vendor_prefix_str;
+ uint32_t vendor_prefix;
+
+ base::RemoveChars(bssid.substr(0, 9), ":", &vendor_prefix_str);
+ DCHECK_EQ(6U, vendor_prefix_str.size());
+ if (base::HexStringToUInt(vendor_prefix_str, &vendor_prefix))
+ access_point_info->set_vendor_prefix(vendor_prefix);
+ else
+ NOTREACHED();
+ }
+
+ // Return if vendor information is not provided.
+ if (info.model_number.empty() && info.model_name.empty() &&
+ info.device_name.empty() && info.oui_list.empty())
+ return;
+
+ SystemProfileProto::Network::WifiAccessPoint::VendorInformation* vendor =
+ access_point_info->mutable_vendor_info();
+ if (!info.model_number.empty())
+ vendor->set_model_number(info.model_number);
+ if (!info.model_name.empty())
+ vendor->set_model_name(info.model_name);
+ if (!info.device_name.empty())
+ vendor->set_device_name(info.device_name);
+
+ // Return if OUI list is not provided.
+ if (info.oui_list.empty())
+ return;
+
+ // Parse OUI list.
+ for (const base::StringPiece& oui_str : base::SplitStringPiece(
+ info.oui_list, " ", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL)) {
+ uint32_t oui;
+ if (base::HexStringToUInt(oui_str, &oui)) {
+ vendor->add_element_identifier(oui);
+ } else {
+ DLOG(WARNING) << "Error when parsing OUI list of the WiFi access point";
+ }
+ }
+}
+
+void NetworkMetricsProvider::LogAggregatedMetrics() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ base::HistogramBase* error_codes = base::SparseHistogram::FactoryGet(
+ "Net.ErrorCodesForMainFrame4",
+ base::HistogramBase::kUmaTargetedHistogramFlag);
+ std::unique_ptr<base::HistogramSamples> samples =
+ error_codes->SnapshotSamples();
+ base::HistogramBase::Count new_aborts =
+ samples->GetCount(-net::ERR_ABORTED) - total_aborts_;
+ base::HistogramBase::Count new_codes = samples->TotalCount() - total_codes_;
+ if (new_codes > 0) {
+ UMA_HISTOGRAM_CUSTOM_COUNTS("Net.ErrAborted.CountPerUpload2", new_aborts, 1,
+ 100000000, 50);
+ UMA_HISTOGRAM_PERCENTAGE("Net.ErrAborted.ProportionPerUpload",
+ (100 * new_aborts) / new_codes);
+ total_codes_ += new_codes;
+ total_aborts_ += new_aborts;
+ }
+}
+
+void NetworkMetricsProvider::OnEffectiveConnectionTypeChanged(
+ net::EffectiveConnectionType type) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ effective_connection_type_ = type;
+
+ if (effective_connection_type_ == net::EFFECTIVE_CONNECTION_TYPE_UNKNOWN ||
+ effective_connection_type_ == net::EFFECTIVE_CONNECTION_TYPE_OFFLINE) {
+ // The effective connection type may be reported as Unknown if there is a
+ // change in the connection type. Disregard it since network requests can't
+ // be send during the changes in connection type. Similarly, disregard
+ // offline as the type since it may be reported as the effective connection
+ // type for a short period when there is a change in the connection type.
+ return;
+ }
+
+ if (min_effective_connection_type_ ==
+ net::EFFECTIVE_CONNECTION_TYPE_UNKNOWN &&
+ max_effective_connection_type_ ==
+ net::EFFECTIVE_CONNECTION_TYPE_UNKNOWN) {
+ min_effective_connection_type_ = type;
+ max_effective_connection_type_ = type;
+ return;
+ }
+
+ if (min_effective_connection_type_ ==
+ net::EFFECTIVE_CONNECTION_TYPE_OFFLINE &&
+ max_effective_connection_type_ ==
+ net::EFFECTIVE_CONNECTION_TYPE_OFFLINE) {
+ min_effective_connection_type_ = type;
+ max_effective_connection_type_ = type;
+ return;
+ }
+
+ min_effective_connection_type_ =
+ std::min(min_effective_connection_type_, effective_connection_type_);
+ max_effective_connection_type_ =
+ std::max(max_effective_connection_type_, effective_connection_type_);
+
+ DCHECK_EQ(
+ min_effective_connection_type_ == net::EFFECTIVE_CONNECTION_TYPE_UNKNOWN,
+ max_effective_connection_type_ == net::EFFECTIVE_CONNECTION_TYPE_UNKNOWN);
+ DCHECK_EQ(
+ min_effective_connection_type_ == net::EFFECTIVE_CONNECTION_TYPE_OFFLINE,
+ max_effective_connection_type_ == net::EFFECTIVE_CONNECTION_TYPE_OFFLINE);
+}
+
+} // namespace metrics
diff --git a/components/metrics/net/network_metrics_provider.h b/components/metrics/net/network_metrics_provider.h
new file mode 100644
index 0000000..3b9932f
--- /dev/null
+++ b/components/metrics/net/network_metrics_provider.h
@@ -0,0 +1,159 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_NET_NETWORK_METRICS_PROVIDER_H_
+#define COMPONENTS_METRICS_NET_NETWORK_METRICS_PROVIDER_H_
+
+#include <memory>
+
+#include "base/macros.h"
+#include "base/memory/weak_ptr.h"
+#include "base/metrics/histogram_base.h"
+#include "base/sequenced_task_runner.h"
+#include "base/threading/thread_checker.h"
+#include "components/metrics/metrics_provider.h"
+#include "components/metrics/net/wifi_access_point_info_provider.h"
+#include "net/base/network_change_notifier.h"
+#include "net/base/network_interfaces.h"
+#include "net/nqe/effective_connection_type.h"
+#include "third_party/metrics_proto/system_profile.pb.h"
+
+namespace net {
+class NetworkQualityEstimator;
+}
+
+namespace metrics {
+
+SystemProfileProto::Network::EffectiveConnectionType
+ConvertEffectiveConnectionType(
+ net::EffectiveConnectionType effective_connection_type);
+
+// Registers as observer with net::NetworkChangeNotifier and keeps track of
+// the network environment.
+class NetworkMetricsProvider
+ : public MetricsProvider,
+ public net::NetworkChangeNotifier::NetworkChangeObserver {
+ public:
+ // Class that provides |this| with the network quality estimator.
+ class NetworkQualityEstimatorProvider {
+ public:
+ virtual ~NetworkQualityEstimatorProvider() {}
+
+ // Returns the network quality estimator by calling |io_callback|. The
+ // returned network quality estimator may be nullptr. |io_callback| must be
+ // called on the IO thread. |io_callback| can be destroyed on IO thread only
+ // after |this| is destroyed.
+ virtual void PostReplyNetworkQualityEstimator(
+ base::Callback<void(net::NetworkQualityEstimator*)> io_callback) = 0;
+
+ // Returns the task runner on which |this| should be used and destroyed.
+ virtual scoped_refptr<base::SequencedTaskRunner> GetTaskRunner() = 0;
+
+ protected:
+ NetworkQualityEstimatorProvider() {}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(NetworkQualityEstimatorProvider);
+ };
+
+ // Creates a NetworkMetricsProvider, where
+ // |network_quality_estimator_provider| should be set if it is useful to
+ // attach the quality of the network to the metrics report.
+ explicit NetworkMetricsProvider(
+ std::unique_ptr<NetworkQualityEstimatorProvider>
+ network_quality_estimator_provider = nullptr);
+ ~NetworkMetricsProvider() override;
+
+ private:
+ FRIEND_TEST_ALL_PREFIXES(NetworkMetricsProviderTest, EffectiveConnectionType);
+ FRIEND_TEST_ALL_PREFIXES(NetworkMetricsProviderTest,
+ ECTAmbiguousOnConnectionTypeChange);
+ FRIEND_TEST_ALL_PREFIXES(NetworkMetricsProviderTest,
+ ECTNotAmbiguousOnOffline);
+ FRIEND_TEST_ALL_PREFIXES(NetworkMetricsProviderTest,
+ ConnectionTypeIsAmbiguous);
+
+ // Listens to the changes in the effective conection type.
+ class EffectiveConnectionTypeObserver;
+
+ // MetricsProvider:
+ void ProvideCurrentSessionData(
+ ChromeUserMetricsExtension* uma_proto) override;
+ void ProvideSystemProfileMetrics(SystemProfileProto* system_profile) override;
+
+ // NetworkChangeObserver:
+ void OnNetworkChanged(
+ net::NetworkChangeNotifier::ConnectionType type) override;
+
+ SystemProfileProto::Network::ConnectionType GetConnectionType() const;
+ SystemProfileProto::Network::WifiPHYLayerProtocol GetWifiPHYLayerProtocol()
+ const;
+
+ // Posts a call to net::GetWifiPHYLayerProtocol on the blocking pool.
+ void ProbeWifiPHYLayerProtocol();
+ // Callback from the blocking pool with the result of
+ // net::GetWifiPHYLayerProtocol.
+ void OnWifiPHYLayerProtocolResult(net::WifiPHYLayerProtocol mode);
+
+ // Writes info about the wireless access points that this system is
+ // connected to.
+ void WriteWifiAccessPointProto(
+ const WifiAccessPointInfoProvider::WifiAccessPointInfo& info,
+ SystemProfileProto::Network* network_proto);
+
+ // Logs metrics that are functions of other metrics being uploaded.
+ void LogAggregatedMetrics();
+
+ // Notifies |this| that the effective connection type of the current network
+ // has changed to |type|.
+ void OnEffectiveConnectionTypeChanged(net::EffectiveConnectionType type);
+
+ // True if |connection_type_| changed during the lifetime of the log.
+ bool connection_type_is_ambiguous_;
+ // The connection type according to net::NetworkChangeNotifier.
+ net::NetworkChangeNotifier::ConnectionType connection_type_;
+ // True if the network change notifier has been initialized.
+ bool network_change_notifier_initialized_;
+
+ // True if |wifi_phy_layer_protocol_| changed during the lifetime of the log.
+ bool wifi_phy_layer_protocol_is_ambiguous_;
+ // The PHY mode of the currently associated access point obtained via
+ // net::GetWifiPHYLayerProtocol.
+ net::WifiPHYLayerProtocol wifi_phy_layer_protocol_;
+
+ // Helper object for retrieving connected wifi access point information.
+ std::unique_ptr<WifiAccessPointInfoProvider> wifi_access_point_info_provider_;
+
+ // These metrics track histogram totals for the Net.ErrorCodesForMainFrame4
+ // histogram. They are used to compute deltas at upload time.
+ base::HistogramBase::Count total_aborts_;
+ base::HistogramBase::Count total_codes_;
+
+ // Provides the network quality estimator. May be null.
+ std::unique_ptr<NetworkQualityEstimatorProvider>
+ network_quality_estimator_provider_;
+
+ // Listens to the changes in the effective connection type. Initialized and
+ // destroyed on the IO thread. May be null.
+ std::unique_ptr<EffectiveConnectionTypeObserver>
+ effective_connection_type_observer_;
+
+ // Last known effective connection type.
+ net::EffectiveConnectionType effective_connection_type_;
+
+ // Minimum and maximum effective connection type since the metrics were last
+ // provided.
+ net::EffectiveConnectionType min_effective_connection_type_;
+ net::EffectiveConnectionType max_effective_connection_type_;
+
+ base::ThreadChecker thread_checker_;
+
+ base::WeakPtrFactory<NetworkMetricsProvider> weak_ptr_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(NetworkMetricsProvider);
+};
+
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_NET_NETWORK_METRICS_PROVIDER_H_
diff --git a/components/metrics/net/network_metrics_provider_unittest.cc b/components/metrics/net/network_metrics_provider_unittest.cc
new file mode 100644
index 0000000..b5e6507
--- /dev/null
+++ b/components/metrics/net/network_metrics_provider_unittest.cc
@@ -0,0 +1,290 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/net/network_metrics_provider.h"
+
+#include <memory>
+
+#include "base/callback.h"
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/run_loop.h"
+#include "base/test/scoped_task_environment.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "net/base/network_change_notifier.h"
+#include "net/nqe/network_quality_estimator_test_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/metrics_proto/system_profile.pb.h"
+
+#if defined(OS_CHROMEOS)
+#include "chromeos/dbus/dbus_thread_manager.h"
+#include "chromeos/network/network_handler.h"
+#endif // OS_CHROMEOS
+
+namespace metrics {
+
+namespace {
+
+class TestNetworkQualityEstimatorProvider
+ : public NetworkMetricsProvider::NetworkQualityEstimatorProvider {
+ public:
+ explicit TestNetworkQualityEstimatorProvider(
+ net::TestNetworkQualityEstimator* estimator)
+ : estimator_(estimator) {}
+ ~TestNetworkQualityEstimatorProvider() override {}
+
+ private:
+ // NetworkMetricsProvider::NetworkQualityEstimatorProvider:
+ scoped_refptr<base::SequencedTaskRunner> GetTaskRunner() override {
+ return base::ThreadTaskRunnerHandle::Get();
+ }
+
+ void PostReplyNetworkQualityEstimator(
+ base::Callback<void(net::NetworkQualityEstimator*)> callback) override {
+ callback.Run(estimator_);
+ }
+
+ net::TestNetworkQualityEstimator* estimator_;
+ DISALLOW_COPY_AND_ASSIGN(TestNetworkQualityEstimatorProvider);
+};
+
+} // namespace
+
+class NetworkMetricsProviderTest : public testing::Test {
+ protected:
+ NetworkMetricsProviderTest()
+ : scoped_task_environment_(
+ base::test::ScopedTaskEnvironment::MainThreadType::IO) {
+#if defined(OS_CHROMEOS)
+ chromeos::DBusThreadManager::Initialize();
+ chromeos::NetworkHandler::Initialize();
+#endif // OS_CHROMEOS
+ }
+
+ private:
+ base::test::ScopedTaskEnvironment scoped_task_environment_;
+};
+
+// Verifies that the effective connection type is correctly set.
+TEST_F(NetworkMetricsProviderTest, EffectiveConnectionType) {
+ net::TestNetworkQualityEstimator estimator;
+ std::unique_ptr<NetworkMetricsProvider::NetworkQualityEstimatorProvider>
+ estimator_provider(base::WrapUnique(
+ new TestNetworkQualityEstimatorProvider(&estimator)));
+ SystemProfileProto system_profile;
+ NetworkMetricsProvider network_metrics_provider(
+ std::move(estimator_provider));
+
+ EXPECT_EQ(net::EFFECTIVE_CONNECTION_TYPE_UNKNOWN,
+ network_metrics_provider.effective_connection_type_);
+ EXPECT_EQ(net::EFFECTIVE_CONNECTION_TYPE_UNKNOWN,
+ network_metrics_provider.min_effective_connection_type_);
+ EXPECT_EQ(net::EFFECTIVE_CONNECTION_TYPE_UNKNOWN,
+ network_metrics_provider.max_effective_connection_type_);
+ network_metrics_provider.ProvideSystemProfileMetrics(&system_profile);
+ EXPECT_EQ(SystemProfileProto::Network::EFFECTIVE_CONNECTION_TYPE_UNKNOWN,
+ system_profile.network().min_effective_connection_type());
+ EXPECT_EQ(SystemProfileProto::Network::EFFECTIVE_CONNECTION_TYPE_UNKNOWN,
+ system_profile.network().max_effective_connection_type());
+
+ // Set RTT so that the effective connection type is computed as 2G.
+ estimator.set_recent_http_rtt(base::TimeDelta::FromMilliseconds(1500));
+ estimator.SetStartTimeNullHttpRtt(base::TimeDelta::FromMilliseconds(1500));
+ EXPECT_EQ(net::EFFECTIVE_CONNECTION_TYPE_UNKNOWN,
+ network_metrics_provider.effective_connection_type_);
+ EXPECT_EQ(net::EFFECTIVE_CONNECTION_TYPE_UNKNOWN,
+ network_metrics_provider.min_effective_connection_type_);
+ EXPECT_EQ(net::EFFECTIVE_CONNECTION_TYPE_UNKNOWN,
+ network_metrics_provider.max_effective_connection_type_);
+ // Running a request would cause the effective connection type to be computed
+ // as 2G, and observers to be notified.
+ estimator.RunOneRequest();
+ EXPECT_EQ(net::EFFECTIVE_CONNECTION_TYPE_2G,
+ network_metrics_provider.effective_connection_type_);
+ EXPECT_EQ(net::EFFECTIVE_CONNECTION_TYPE_2G,
+ network_metrics_provider.min_effective_connection_type_);
+ EXPECT_EQ(net::EFFECTIVE_CONNECTION_TYPE_2G,
+ network_metrics_provider.max_effective_connection_type_);
+ network_metrics_provider.ProvideSystemProfileMetrics(&system_profile);
+ EXPECT_EQ(SystemProfileProto::Network::EFFECTIVE_CONNECTION_TYPE_2G,
+ system_profile.network().min_effective_connection_type());
+ EXPECT_EQ(SystemProfileProto::Network::EFFECTIVE_CONNECTION_TYPE_2G,
+ system_profile.network().max_effective_connection_type());
+
+ // Set RTT so that the effective connection type is computed as SLOW_2G.
+ estimator.set_recent_http_rtt(base::TimeDelta::FromMilliseconds(3000));
+ estimator.SetStartTimeNullHttpRtt(base::TimeDelta::FromMilliseconds(3000));
+ // Running a request would cause the effective connection type to be computed
+ // as SLOW_2G, and observers to be notified.
+ estimator.RunOneRequest();
+ EXPECT_EQ(net::EFFECTIVE_CONNECTION_TYPE_SLOW_2G,
+ network_metrics_provider.effective_connection_type_);
+ EXPECT_EQ(net::EFFECTIVE_CONNECTION_TYPE_SLOW_2G,
+ network_metrics_provider.min_effective_connection_type_);
+ EXPECT_EQ(net::EFFECTIVE_CONNECTION_TYPE_2G,
+ network_metrics_provider.max_effective_connection_type_);
+ network_metrics_provider.ProvideSystemProfileMetrics(&system_profile);
+ // Effective connection type changed from 2G to SLOW_2G during the lifetime of
+ // the log. Minimum value of ECT must be different from the maximum value.
+ EXPECT_EQ(SystemProfileProto::Network::EFFECTIVE_CONNECTION_TYPE_SLOW_2G,
+ system_profile.network().min_effective_connection_type());
+ EXPECT_EQ(SystemProfileProto::Network::EFFECTIVE_CONNECTION_TYPE_2G,
+ system_profile.network().max_effective_connection_type());
+
+ // Getting the system profile again should return the current effective
+ // connection type.
+ network_metrics_provider.ProvideSystemProfileMetrics(&system_profile);
+ EXPECT_EQ(SystemProfileProto::Network::EFFECTIVE_CONNECTION_TYPE_SLOW_2G,
+ system_profile.network().min_effective_connection_type());
+ EXPECT_EQ(SystemProfileProto::Network::EFFECTIVE_CONNECTION_TYPE_SLOW_2G,
+ system_profile.network().max_effective_connection_type());
+}
+
+// Verifies that the effective connection type is not set to UNKNOWN when there
+// is a change in the connection type.
+TEST_F(NetworkMetricsProviderTest, ECTAmbiguousOnConnectionTypeChange) {
+ net::TestNetworkQualityEstimator estimator;
+ std::unique_ptr<NetworkMetricsProvider::NetworkQualityEstimatorProvider>
+ estimator_provider(base::WrapUnique(
+ new TestNetworkQualityEstimatorProvider(&estimator)));
+ SystemProfileProto system_profile;
+ NetworkMetricsProvider network_metrics_provider(
+ std::move(estimator_provider));
+
+ EXPECT_EQ(net::EFFECTIVE_CONNECTION_TYPE_UNKNOWN,
+ network_metrics_provider.effective_connection_type_);
+ EXPECT_EQ(net::EFFECTIVE_CONNECTION_TYPE_UNKNOWN,
+ network_metrics_provider.min_effective_connection_type_);
+ EXPECT_EQ(net::EFFECTIVE_CONNECTION_TYPE_UNKNOWN,
+ network_metrics_provider.max_effective_connection_type_);
+
+ // Set RTT so that the effective connection type is computed as 2G.
+ estimator.set_recent_http_rtt(base::TimeDelta::FromMilliseconds(1500));
+ estimator.SetStartTimeNullHttpRtt(base::TimeDelta::FromMilliseconds(1500));
+ // Running a request would cause the effective connection type to be computed
+ // as 2G, and observers to be notified.
+ estimator.RunOneRequest();
+ EXPECT_EQ(net::EFFECTIVE_CONNECTION_TYPE_2G,
+ network_metrics_provider.effective_connection_type_);
+ EXPECT_EQ(net::EFFECTIVE_CONNECTION_TYPE_2G,
+ network_metrics_provider.min_effective_connection_type_);
+ EXPECT_EQ(net::EFFECTIVE_CONNECTION_TYPE_2G,
+ network_metrics_provider.max_effective_connection_type_);
+
+ // There is no change in the connection type. Effective connection types
+ // should be reported as 2G.
+ network_metrics_provider.ProvideSystemProfileMetrics(&system_profile);
+ EXPECT_EQ(SystemProfileProto::Network::EFFECTIVE_CONNECTION_TYPE_2G,
+ system_profile.network().min_effective_connection_type());
+ EXPECT_EQ(SystemProfileProto::Network::EFFECTIVE_CONNECTION_TYPE_2G,
+ system_profile.network().max_effective_connection_type());
+
+ // Even with change in the connection type, effective connection types
+ // should be reported as 2G.
+ network_metrics_provider.OnNetworkChanged(
+ net::NetworkChangeNotifier::CONNECTION_2G);
+ network_metrics_provider.ProvideSystemProfileMetrics(&system_profile);
+ EXPECT_EQ(SystemProfileProto::Network::EFFECTIVE_CONNECTION_TYPE_2G,
+ system_profile.network().min_effective_connection_type());
+ EXPECT_EQ(SystemProfileProto::Network::EFFECTIVE_CONNECTION_TYPE_2G,
+ system_profile.network().max_effective_connection_type());
+}
+
+// Verifies that the effective connection type is not set to UNKNOWN when the
+// connection type is OFFLINE.
+TEST_F(NetworkMetricsProviderTest, ECTNotAmbiguousOnOffline) {
+ for (net::EffectiveConnectionType force_ect :
+ {net::EFFECTIVE_CONNECTION_TYPE_UNKNOWN,
+ net::EFFECTIVE_CONNECTION_TYPE_OFFLINE}) {
+ std::unique_ptr<net::NetworkQualityEstimatorParams> params =
+ std::make_unique<net::NetworkQualityEstimatorParams>(
+ std::map<std::string, std::string>());
+ net::NetworkQualityEstimatorParams* params_ptr = params.get();
+ net::TestNetworkQualityEstimator estimator(std::move(params));
+
+ std::unique_ptr<NetworkMetricsProvider::NetworkQualityEstimatorProvider>
+ estimator_provider(base::WrapUnique(
+ new TestNetworkQualityEstimatorProvider(&estimator)));
+ SystemProfileProto system_profile;
+ NetworkMetricsProvider network_metrics_provider(
+ std::move(estimator_provider));
+
+ params_ptr->SetForcedEffectiveConnectionType(
+ net::EFFECTIVE_CONNECTION_TYPE_2G);
+ estimator.RunOneRequest();
+
+ params_ptr->SetForcedEffectiveConnectionType(force_ect);
+ estimator.RunOneRequest();
+ network_metrics_provider.ProvideSystemProfileMetrics(&system_profile);
+ EXPECT_EQ(SystemProfileProto::Network::EFFECTIVE_CONNECTION_TYPE_2G,
+ system_profile.network().min_effective_connection_type());
+ EXPECT_EQ(SystemProfileProto::Network::EFFECTIVE_CONNECTION_TYPE_2G,
+ system_profile.network().max_effective_connection_type());
+
+ params_ptr->SetForcedEffectiveConnectionType(
+ net::EFFECTIVE_CONNECTION_TYPE_4G);
+ estimator.RunOneRequest();
+
+ network_metrics_provider.ProvideSystemProfileMetrics(&system_profile);
+ EXPECT_EQ(SystemProfileProto::Network::EFFECTIVE_CONNECTION_TYPE_4G,
+ system_profile.network().min_effective_connection_type());
+ EXPECT_EQ(SystemProfileProto::Network::EFFECTIVE_CONNECTION_TYPE_4G,
+ system_profile.network().max_effective_connection_type());
+ }
+}
+
+// Verifies that the connection type is ambiguous boolean is correctly set.
+TEST_F(NetworkMetricsProviderTest, ConnectionTypeIsAmbiguous) {
+ net::TestNetworkQualityEstimator estimator;
+ std::unique_ptr<NetworkMetricsProvider::NetworkQualityEstimatorProvider>
+ estimator_provider(base::WrapUnique(
+ new TestNetworkQualityEstimatorProvider(&estimator)));
+ SystemProfileProto system_profile;
+ NetworkMetricsProvider network_metrics_provider(
+ std::move(estimator_provider));
+ estimator.RunOneRequest();
+
+ EXPECT_EQ(net::NetworkChangeNotifier::CONNECTION_UNKNOWN,
+ network_metrics_provider.connection_type_);
+ EXPECT_FALSE(network_metrics_provider.connection_type_is_ambiguous_);
+ EXPECT_FALSE(network_metrics_provider.network_change_notifier_initialized_);
+
+ // When a connection type change callback is received, network change notifier
+ // should be marked as initialized.
+ network_metrics_provider.OnNetworkChanged(
+ net::NetworkChangeNotifier::CONNECTION_2G);
+ EXPECT_EQ(net::NetworkChangeNotifier::CONNECTION_2G,
+ network_metrics_provider.connection_type_);
+ // Connection type should not be marked as ambiguous when a delayed connection
+ // type change callback is received due to delayed initialization of the
+ // network change notifier.
+ EXPECT_FALSE(network_metrics_provider.connection_type_is_ambiguous_);
+ EXPECT_TRUE(network_metrics_provider.network_change_notifier_initialized_);
+
+ // On collection of the system profile, |connection_type_is_ambiguous_| should
+ // stay false, and |network_change_notifier_initialized_| should remain true.
+ network_metrics_provider.ProvideSystemProfileMetrics(&system_profile);
+ EXPECT_FALSE(network_metrics_provider.connection_type_is_ambiguous_);
+ EXPECT_TRUE(network_metrics_provider.network_change_notifier_initialized_);
+ EXPECT_FALSE(system_profile.network().connection_type_is_ambiguous());
+ EXPECT_EQ(SystemProfileProto::Network::CONNECTION_2G,
+ system_profile.network().connection_type());
+
+ network_metrics_provider.OnNetworkChanged(
+ net::NetworkChangeNotifier::CONNECTION_3G);
+ EXPECT_TRUE(network_metrics_provider.connection_type_is_ambiguous_);
+ EXPECT_TRUE(network_metrics_provider.network_change_notifier_initialized_);
+
+ // On collection of the system profile, |connection_type_is_ambiguous_| should
+ // be reset to false, and |network_change_notifier_initialized_| should remain
+ // true.
+ network_metrics_provider.ProvideSystemProfileMetrics(&system_profile);
+ EXPECT_FALSE(network_metrics_provider.connection_type_is_ambiguous_);
+ EXPECT_TRUE(network_metrics_provider.network_change_notifier_initialized_);
+ EXPECT_TRUE(system_profile.network().connection_type_is_ambiguous());
+ EXPECT_EQ(SystemProfileProto::Network::CONNECTION_3G,
+ system_profile.network().connection_type());
+}
+
+} // namespace metrics
diff --git a/components/metrics/net/wifi_access_point_info_provider.cc b/components/metrics/net/wifi_access_point_info_provider.cc
new file mode 100644
index 0000000..21e04b1
--- /dev/null
+++ b/components/metrics/net/wifi_access_point_info_provider.cc
@@ -0,0 +1,25 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/net/wifi_access_point_info_provider.h"
+
+namespace metrics {
+
+WifiAccessPointInfoProvider::WifiAccessPointInfo::WifiAccessPointInfo() {
+}
+
+WifiAccessPointInfoProvider::WifiAccessPointInfo::~WifiAccessPointInfo() {
+}
+
+WifiAccessPointInfoProvider::WifiAccessPointInfoProvider() {
+}
+
+WifiAccessPointInfoProvider::~WifiAccessPointInfoProvider() {
+}
+
+bool WifiAccessPointInfoProvider::GetInfo(WifiAccessPointInfo *info) {
+ return false;
+}
+
+} // namespace metrics
diff --git a/components/metrics/net/wifi_access_point_info_provider.h b/components/metrics/net/wifi_access_point_info_provider.h
new file mode 100644
index 0000000..3a761da
--- /dev/null
+++ b/components/metrics/net/wifi_access_point_info_provider.h
@@ -0,0 +1,54 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_NET_WIFI_ACCESS_POINT_INFO_PROVIDER_H_
+#define COMPONENTS_METRICS_NET_WIFI_ACCESS_POINT_INFO_PROVIDER_H_
+
+#include <string>
+
+#include "base/macros.h"
+
+namespace metrics {
+
+// Interface for accessing connected wireless access point information.
+class WifiAccessPointInfoProvider {
+ public:
+ // Wifi access point security mode definitions.
+ enum WifiSecurityMode {
+ WIFI_SECURITY_UNKNOWN = 0,
+ WIFI_SECURITY_WPA = 1,
+ WIFI_SECURITY_WEP = 2,
+ WIFI_SECURITY_RSN = 3,
+ WIFI_SECURITY_802_1X = 4,
+ WIFI_SECURITY_PSK = 5,
+ WIFI_SECURITY_NONE
+ };
+
+ // Information of the currently connected wifi access point.
+ struct WifiAccessPointInfo {
+ WifiAccessPointInfo();
+ ~WifiAccessPointInfo();
+ WifiSecurityMode security;
+ std::string bssid;
+ std::string model_number;
+ std::string model_name;
+ std::string device_name;
+ std::string oui_list;
+ };
+
+ WifiAccessPointInfoProvider();
+ virtual ~WifiAccessPointInfoProvider();
+
+ // Fill in the wifi access point info if device is currently connected to a
+ // wifi access point. Return true if device is connected to a wifi access
+ // point, false otherwise.
+ virtual bool GetInfo(WifiAccessPointInfo *info);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(WifiAccessPointInfoProvider);
+};
+
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_NET_WIFI_ACCESS_POINT_INFO_PROVIDER_H_
diff --git a/components/metrics/net/wifi_access_point_info_provider_chromeos.cc b/components/metrics/net/wifi_access_point_info_provider_chromeos.cc
new file mode 100644
index 0000000..3654e93
--- /dev/null
+++ b/components/metrics/net/wifi_access_point_info_provider_chromeos.cc
@@ -0,0 +1,123 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/net/wifi_access_point_info_provider_chromeos.h"
+
+#include <stdint.h>
+
+#include "base/bind.h"
+#include "base/location.h"
+#include "base/strings/string_number_conversions.h"
+#include "chromeos/network/network_configuration_handler.h"
+#include "chromeos/network/network_handler.h"
+#include "chromeos/network/network_state.h"
+#include "chromeos/network/network_state_handler.h"
+#include "chromeos/network/shill_property_util.h"
+#include "third_party/cros_system_api/dbus/service_constants.h"
+
+using chromeos::NetworkHandler;
+
+namespace metrics {
+
+WifiAccessPointInfoProviderChromeos::WifiAccessPointInfoProviderChromeos() {
+ NetworkHandler::Get()->network_state_handler()->AddObserver(this, FROM_HERE);
+
+ // Update initial connection state.
+ DefaultNetworkChanged(
+ NetworkHandler::Get()->network_state_handler()->DefaultNetwork());
+}
+
+WifiAccessPointInfoProviderChromeos::~WifiAccessPointInfoProviderChromeos() {
+ NetworkHandler::Get()->network_state_handler()->RemoveObserver(this,
+ FROM_HERE);
+}
+
+bool WifiAccessPointInfoProviderChromeos::GetInfo(WifiAccessPointInfo* info) {
+ // Wifi access point information is not provided if the BSSID is empty.
+ // This assumes the BSSID is never empty when access point information exists.
+ if (wifi_access_point_info_.bssid.empty())
+ return false;
+
+ *info = wifi_access_point_info_;
+ return true;
+}
+
+void WifiAccessPointInfoProviderChromeos::DefaultNetworkChanged(
+ const chromeos::NetworkState* default_network) {
+ // Reset access point info to prevent reporting of out-dated data.
+ wifi_access_point_info_ = WifiAccessPointInfo();
+
+ // Skip non-wifi connections
+ if (!default_network || default_network->type() != shill::kTypeWifi)
+ return;
+
+ // Retrieve access point info for wifi connection.
+ NetworkHandler::Get()->network_configuration_handler()->GetShillProperties(
+ default_network->path(),
+ base::Bind(&WifiAccessPointInfoProviderChromeos::ParseInfo, AsWeakPtr()),
+ chromeos::network_handler::ErrorCallback());
+}
+
+void WifiAccessPointInfoProviderChromeos::ParseInfo(
+ const std::string &service_path,
+ const base::DictionaryValue& properties) {
+ // Skip services that contain "_nomap" in the SSID.
+ std::string ssid = chromeos::shill_property_util::GetSSIDFromProperties(
+ properties, false /* verbose_logging */, nullptr);
+ if (ssid.find("_nomap", 0) != std::string::npos)
+ return;
+
+ std::string bssid;
+ if (!properties.GetStringWithoutPathExpansion(shill::kWifiBSsid, &bssid) ||
+ bssid.empty())
+ return;
+
+ // Filter out BSSID with local bit set in the first byte.
+ uint32_t first_octet;
+ if (!base::HexStringToUInt(bssid.substr(0, 2), &first_octet))
+ NOTREACHED();
+ if (first_octet & 0x2)
+ return;
+ wifi_access_point_info_.bssid = bssid;
+
+ // Parse security info.
+ std::string security;
+ properties.GetStringWithoutPathExpansion(
+ shill::kSecurityProperty, &security);
+ wifi_access_point_info_.security = WIFI_SECURITY_UNKNOWN;
+ if (security == shill::kSecurityWpa)
+ wifi_access_point_info_.security = WIFI_SECURITY_WPA;
+ else if (security == shill::kSecurityWep)
+ wifi_access_point_info_.security = WIFI_SECURITY_WEP;
+ else if (security == shill::kSecurityRsn)
+ wifi_access_point_info_.security = WIFI_SECURITY_RSN;
+ else if (security == shill::kSecurity8021x)
+ wifi_access_point_info_.security = WIFI_SECURITY_802_1X;
+ else if (security == shill::kSecurityPsk)
+ wifi_access_point_info_.security = WIFI_SECURITY_PSK;
+ else if (security == shill::kSecurityNone)
+ wifi_access_point_info_.security = WIFI_SECURITY_NONE;
+
+ properties.GetStringWithoutPathExpansion(
+ shill::kWifiBSsid, &wifi_access_point_info_.bssid);
+ const base::DictionaryValue* vendor_dict = NULL;
+ if (!properties.GetDictionaryWithoutPathExpansion(
+ shill::kWifiVendorInformationProperty,
+ &vendor_dict))
+ return;
+
+ vendor_dict->GetStringWithoutPathExpansion(
+ shill::kVendorWPSModelNumberProperty,
+ &wifi_access_point_info_.model_number);
+ vendor_dict->GetStringWithoutPathExpansion(
+ shill::kVendorWPSModelNameProperty,
+ &wifi_access_point_info_.model_name);
+ vendor_dict->GetStringWithoutPathExpansion(
+ shill::kVendorWPSDeviceNameProperty,
+ &wifi_access_point_info_.device_name);
+ vendor_dict->GetStringWithoutPathExpansion(shill::kVendorOUIListProperty,
+ &wifi_access_point_info_.oui_list);
+}
+
+} // namespace metrics
diff --git a/components/metrics/net/wifi_access_point_info_provider_chromeos.h b/components/metrics/net/wifi_access_point_info_provider_chromeos.h
new file mode 100644
index 0000000..d310239
--- /dev/null
+++ b/components/metrics/net/wifi_access_point_info_provider_chromeos.h
@@ -0,0 +1,48 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_NET_WIFI_ACCESS_POINT_INFO_PROVIDER_CHROMEOS_H_
+#define COMPONENTS_METRICS_NET_WIFI_ACCESS_POINT_INFO_PROVIDER_CHROMEOS_H_
+
+#include <string>
+
+#include "base/macros.h"
+#include "base/memory/weak_ptr.h"
+#include "base/values.h"
+#include "chromeos/network/network_state_handler_observer.h"
+#include "components/metrics/net/wifi_access_point_info_provider.h"
+
+namespace metrics {
+
+// WifiAccessPointInfoProviderChromeos provides the connected wifi
+// acccess point information for chromeos.
+class WifiAccessPointInfoProviderChromeos
+ : public WifiAccessPointInfoProvider,
+ public chromeos::NetworkStateHandlerObserver,
+ public base::SupportsWeakPtr<WifiAccessPointInfoProviderChromeos> {
+ public:
+ WifiAccessPointInfoProviderChromeos();
+ ~WifiAccessPointInfoProviderChromeos() override;
+
+ // WifiAccessPointInfoProvider:
+ bool GetInfo(WifiAccessPointInfo* info) override;
+
+ // NetworkStateHandlerObserver:
+ void DefaultNetworkChanged(
+ const chromeos::NetworkState* default_network) override;
+
+ private:
+ // Callback from Shill.Service.GetProperties. Parses |properties| to obtain
+ // the wifi access point information.
+ void ParseInfo(const std::string& service_path,
+ const base::DictionaryValue& properties);
+
+ WifiAccessPointInfo wifi_access_point_info_;
+
+ DISALLOW_COPY_AND_ASSIGN(WifiAccessPointInfoProviderChromeos);
+};
+
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_NET_WIFI_ACCESS_POINT_INFO_PROVIDER_CHROMEOS_H_
diff --git a/components/metrics/persisted_logs.cc b/components/metrics/persisted_logs.cc
new file mode 100644
index 0000000..74d1ca7
--- /dev/null
+++ b/components/metrics/persisted_logs.cc
@@ -0,0 +1,227 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/persisted_logs.h"
+
+#include <memory>
+#include <string>
+#include <utility>
+
+#include "base/base64.h"
+#include "base/md5.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/sha1.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/timer/elapsed_timer.h"
+#include "components/metrics/persisted_logs_metrics.h"
+#include "components/prefs/pref_service.h"
+#include "components/prefs/scoped_user_pref_update.h"
+#include "third_party/zlib/google/compression_utils.h"
+
+namespace metrics {
+
+namespace {
+
+const char kLogHashKey[] = "hash";
+const char kLogTimestampKey[] = "timestamp";
+const char kLogDataKey[] = "data";
+
+std::string EncodeToBase64(const std::string& to_convert) {
+ // CHECK to diagnose crbug.com/695433
+ CHECK(to_convert.data());
+ std::string base64_result;
+ base::Base64Encode(to_convert, &base64_result);
+ return base64_result;
+}
+
+std::string DecodeFromBase64(const std::string& to_convert) {
+ std::string result;
+ base::Base64Decode(to_convert, &result);
+ return result;
+}
+
+} // namespace
+
+void PersistedLogs::LogInfo::Init(PersistedLogsMetrics* metrics,
+ const std::string& log_data,
+ const std::string& log_timestamp) {
+ DCHECK(!log_data.empty());
+
+ if (!compression::GzipCompress(log_data, &compressed_log_data)) {
+ NOTREACHED();
+ return;
+ }
+
+ metrics->RecordCompressionRatio(compressed_log_data.size(), log_data.size());
+
+ hash = base::SHA1HashString(log_data);
+ timestamp = log_timestamp;
+}
+
+PersistedLogs::PersistedLogs(std::unique_ptr<PersistedLogsMetrics> metrics,
+ PrefService* local_state,
+ const char* pref_name,
+ size_t min_log_count,
+ size_t min_log_bytes,
+ size_t max_log_size)
+ : metrics_(std::move(metrics)),
+ local_state_(local_state),
+ pref_name_(pref_name),
+ min_log_count_(min_log_count),
+ min_log_bytes_(min_log_bytes),
+ max_log_size_(max_log_size != 0 ? max_log_size : static_cast<size_t>(-1)),
+ staged_log_index_(-1) {
+ DCHECK(local_state_);
+ // One of the limit arguments must be non-zero.
+ DCHECK(min_log_count_ > 0 || min_log_bytes_ > 0);
+}
+
+PersistedLogs::~PersistedLogs() {}
+
+bool PersistedLogs::has_unsent_logs() const {
+ return !!size();
+}
+
+// True if a log has been staged.
+bool PersistedLogs::has_staged_log() const {
+ return staged_log_index_ != -1;
+}
+
+// Returns the element in the front of the list.
+const std::string& PersistedLogs::staged_log() const {
+ DCHECK(has_staged_log());
+ return list_[staged_log_index_].compressed_log_data;
+}
+
+// Returns the element in the front of the list.
+const std::string& PersistedLogs::staged_log_hash() const {
+ DCHECK(has_staged_log());
+ return list_[staged_log_index_].hash;
+}
+
+// Returns the timestamp of the element in the front of the list.
+const std::string& PersistedLogs::staged_log_timestamp() const {
+ DCHECK(has_staged_log());
+ return list_[staged_log_index_].timestamp;
+}
+
+void PersistedLogs::StageNextLog() {
+ // CHECK, rather than DCHECK, because swap()ing with an empty list causes
+ // hard-to-identify crashes much later.
+ CHECK(!list_.empty());
+ DCHECK(!has_staged_log());
+ staged_log_index_ = list_.size() - 1;
+ DCHECK(has_staged_log());
+}
+
+void PersistedLogs::DiscardStagedLog() {
+ // CHECK, rather than DCHECK, to diagnose cause of crashes from the field,
+ // for crbug.com/695433.
+ CHECK(has_staged_log());
+ DCHECK_LT(static_cast<size_t>(staged_log_index_), list_.size());
+ list_.erase(list_.begin() + staged_log_index_);
+ staged_log_index_ = -1;
+}
+
+void PersistedLogs::PersistUnsentLogs() const {
+ ListPrefUpdate update(local_state_, pref_name_);
+ WriteLogsToPrefList(update.Get());
+}
+
+void PersistedLogs::LoadPersistedUnsentLogs() {
+ ReadLogsFromPrefList(*local_state_->GetList(pref_name_));
+}
+
+void PersistedLogs::StoreLog(const std::string& log_data) {
+ list_.push_back(LogInfo());
+ list_.back().Init(metrics_.get(), log_data,
+ base::Int64ToString(base::Time::Now().ToTimeT()));
+}
+
+void PersistedLogs::Purge() {
+ if (has_staged_log()) {
+ DiscardStagedLog();
+ }
+ list_.clear();
+ local_state_->ClearPref(pref_name_);
+}
+
+void PersistedLogs::ReadLogsFromPrefList(const base::ListValue& list_value) {
+ if (list_value.empty()) {
+ metrics_->RecordLogReadStatus(PersistedLogsMetrics::LIST_EMPTY);
+ return;
+ }
+
+ const size_t log_count = list_value.GetSize();
+
+ DCHECK(list_.empty());
+ list_.resize(log_count);
+
+ for (size_t i = 0; i < log_count; ++i) {
+ const base::DictionaryValue* dict;
+ if (!list_value.GetDictionary(i, &dict) ||
+ !dict->GetString(kLogDataKey, &list_[i].compressed_log_data) ||
+ !dict->GetString(kLogHashKey, &list_[i].hash)) {
+ list_.clear();
+ metrics_->RecordLogReadStatus(
+ PersistedLogsMetrics::LOG_STRING_CORRUPTION);
+ return;
+ }
+
+ list_[i].compressed_log_data =
+ DecodeFromBase64(list_[i].compressed_log_data);
+ list_[i].hash = DecodeFromBase64(list_[i].hash);
+ // Ignoring the success of this step as timestamp might not be there for
+ // older logs.
+ // NOTE: Should be added to the check with other fields once migration is
+ // over.
+ dict->GetString(kLogTimestampKey, &list_[i].timestamp);
+ }
+
+ metrics_->RecordLogReadStatus(PersistedLogsMetrics::RECALL_SUCCESS);
+}
+
+void PersistedLogs::WriteLogsToPrefList(base::ListValue* list_value) const {
+ list_value->Clear();
+
+ // Keep the most recent logs which are smaller than |max_log_size_|.
+ // We keep at least |min_log_bytes_| and |min_log_count_| of logs before
+ // discarding older logs.
+ size_t start = list_.size();
+ size_t saved_log_count = 0;
+ size_t bytes_used = 0;
+ for (; start > 0; --start) {
+ size_t log_size = list_[start - 1].compressed_log_data.length();
+ if (bytes_used >= min_log_bytes_ &&
+ saved_log_count >= min_log_count_) {
+ break;
+ }
+ // Oversized logs won't be persisted, so don't count them.
+ if (log_size > max_log_size_)
+ continue;
+ bytes_used += log_size;
+ ++saved_log_count;
+ }
+ int dropped_logs_num = start - 1;
+
+ for (size_t i = start; i < list_.size(); ++i) {
+ size_t log_size = list_[i].compressed_log_data.length();
+ if (log_size > max_log_size_) {
+ metrics_->RecordDroppedLogSize(log_size);
+ dropped_logs_num++;
+ continue;
+ }
+ std::unique_ptr<base::DictionaryValue> dict_value(
+ new base::DictionaryValue);
+ dict_value->SetString(kLogHashKey, EncodeToBase64(list_[i].hash));
+ dict_value->SetString(kLogDataKey,
+ EncodeToBase64(list_[i].compressed_log_data));
+ dict_value->SetString(kLogTimestampKey, list_[i].timestamp);
+ list_value->Append(std::move(dict_value));
+ }
+ if (dropped_logs_num > 0)
+ metrics_->RecordDroppedLogsNum(dropped_logs_num);
+}
+
+} // namespace metrics
diff --git a/components/metrics/persisted_logs.h b/components/metrics/persisted_logs.h
new file mode 100644
index 0000000..f95b820
--- /dev/null
+++ b/components/metrics/persisted_logs.h
@@ -0,0 +1,125 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_PERSISTED_LOGS_H_
+#define COMPONENTS_METRICS_PERSISTED_LOGS_H_
+
+#include <stddef.h>
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/values.h"
+#include "components/metrics/log_store.h"
+
+class PrefService;
+
+namespace metrics {
+
+class PersistedLogsMetrics;
+
+// Maintains a list of unsent logs that are written and restored from disk.
+class PersistedLogs : public LogStore {
+ public:
+ // Constructs a PersistedLogs that stores data in |local_state| under the
+ // preference |pref_name|.
+ // Calling code is responsible for ensuring that the lifetime of |local_state|
+ // is longer than the lifetime of PersistedLogs.
+ //
+ // When saving logs to disk, stores either the first |min_log_count| logs, or
+ // at least |min_log_bytes| bytes of logs, whichever is greater.
+ //
+ // If the optional |max_log_size| parameter is non-zero, all logs larger than
+ // that limit will be skipped when writing to disk.
+ PersistedLogs(std::unique_ptr<PersistedLogsMetrics> metrics,
+ PrefService* local_state,
+ const char* pref_name,
+ size_t min_log_count,
+ size_t min_log_bytes,
+ size_t max_log_size);
+ ~PersistedLogs();
+
+ // LogStore:
+ bool has_unsent_logs() const override;
+ bool has_staged_log() const override;
+ const std::string& staged_log() const override;
+ const std::string& staged_log_hash() const override;
+ void StageNextLog() override;
+ void DiscardStagedLog() override;
+ void PersistUnsentLogs() const override;
+ void LoadPersistedUnsentLogs() override;
+
+ // Adds a log to the list.
+ void StoreLog(const std::string& log_data);
+
+ // Delete all logs, in memory and on disk.
+ void Purge();
+
+ // Returns the timestamp of the element in the front of the list.
+ const std::string& staged_log_timestamp() const;
+
+ // The number of elements currently stored.
+ size_t size() const { return list_.size(); }
+
+ private:
+ // Writes the list to the ListValue.
+ void WriteLogsToPrefList(base::ListValue* list) const;
+
+ // Reads the list from the ListValue.
+ void ReadLogsFromPrefList(const base::ListValue& list);
+
+ // An object for recording UMA metrics.
+ std::unique_ptr<PersistedLogsMetrics> metrics_;
+
+ // A weak pointer to the PrefService object to read and write the preference
+ // from. Calling code should ensure this object continues to exist for the
+ // lifetime of the PersistedLogs object.
+ PrefService* local_state_;
+
+ // The name of the preference to serialize logs to/from.
+ const char* pref_name_;
+
+ // We will keep at least this |min_log_count_| logs or |min_log_bytes_| bytes
+ // of logs, whichever is greater, when writing to disk. These apply after
+ // skipping logs greater than |max_log_size_|.
+ const size_t min_log_count_;
+ const size_t min_log_bytes_;
+
+ // Logs greater than this size will not be written to disk.
+ const size_t max_log_size_;
+
+ struct LogInfo {
+ // Initializes the members based on uncompressed |log_data| and
+ // |log_timestamp|.
+ // |metrics| is the parent's metrics_ object, and should not be held.
+ void Init(PersistedLogsMetrics* metrics,
+ const std::string& log_data,
+ const std::string& log_timestamp);
+
+ // Compressed log data - a serialized protobuf that's been gzipped.
+ std::string compressed_log_data;
+
+ // The SHA1 hash of log, stored to catch errors from memory corruption.
+ std::string hash;
+
+ // The timestamp of when the log was created as a time_t value.
+ std::string timestamp;
+ };
+ // A list of all of the stored logs, stored with SHA1 hashes to check for
+ // corruption while they are stored in memory.
+ std::vector<LogInfo> list_;
+
+ // The index and type of the log staged for upload. If nothing has been
+ // staged, the index will be -1.
+ int staged_log_index_;
+
+ DISALLOW_COPY_AND_ASSIGN(PersistedLogs);
+};
+
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_PERSISTED_LOGS_H_
diff --git a/components/metrics/persisted_logs_metrics.h b/components/metrics/persisted_logs_metrics.h
new file mode 100644
index 0000000..e3c58dd
--- /dev/null
+++ b/components/metrics/persisted_logs_metrics.h
@@ -0,0 +1,52 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_PERSISTED_LOGS_METRICS_H_
+#define COMPONENTS_METRICS_PERSISTED_LOGS_METRICS_H_
+
+#include "base/macros.h"
+#include "components/metrics/persisted_logs.h"
+
+namespace metrics {
+
+// Interface for recording metrics from PersistedLogs.
+class PersistedLogsMetrics {
+ public:
+ // Used to produce a histogram that keeps track of the status of recalling
+ // persisted per logs.
+ enum LogReadStatus {
+ RECALL_SUCCESS, // We were able to correctly recall a persisted log.
+ LIST_EMPTY, // Attempting to recall from an empty list.
+ LIST_SIZE_MISSING, // Failed to recover list size using GetAsInteger().
+ LIST_SIZE_TOO_SMALL, // Too few elements in the list (less than 3).
+ LIST_SIZE_CORRUPTION, // List size is not as expected.
+ LOG_STRING_CORRUPTION, // Failed to recover log string using GetAsString().
+ CHECKSUM_CORRUPTION, // Failed to verify checksum.
+ CHECKSUM_STRING_CORRUPTION, // Failed to recover checksum string using
+ // GetAsString().
+ DECODE_FAIL, // Failed to decode log.
+ DEPRECATED_XML_PROTO_MISMATCH, // The XML and protobuf logs have
+ // inconsistent data.
+ END_RECALL_STATUS // Number of bins to use to create the histogram.
+ };
+
+ PersistedLogsMetrics() {}
+ virtual ~PersistedLogsMetrics() {}
+
+ virtual void RecordLogReadStatus(LogReadStatus status){};
+
+ virtual void RecordCompressionRatio(
+ size_t compressed_size, size_t original_size) {}
+
+ virtual void RecordDroppedLogSize(size_t size) {}
+
+ virtual void RecordDroppedLogsNum(int dropped_logs_num) {}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(PersistedLogsMetrics);
+};
+
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_PERSISTED_LOGS_METRICS_H_
diff --git a/components/metrics/persisted_logs_metrics_impl.cc b/components/metrics/persisted_logs_metrics_impl.cc
new file mode 100644
index 0000000..7142ba5
--- /dev/null
+++ b/components/metrics/persisted_logs_metrics_impl.cc
@@ -0,0 +1,33 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/persisted_logs_metrics_impl.h"
+
+#include "base/metrics/histogram_macros.h"
+
+namespace metrics {
+
+void PersistedLogsMetricsImpl::RecordLogReadStatus(
+ PersistedLogsMetrics::LogReadStatus status) {
+ UMA_HISTOGRAM_ENUMERATION("PrefService.PersistentLogRecallProtobufs", status,
+ PersistedLogsMetrics::END_RECALL_STATUS);
+}
+
+void PersistedLogsMetricsImpl::RecordCompressionRatio(
+ size_t compressed_size, size_t original_size) {
+ UMA_HISTOGRAM_PERCENTAGE(
+ "UMA.ProtoCompressionRatio",
+ static_cast<int>(100 * compressed_size / original_size));
+}
+
+void PersistedLogsMetricsImpl::RecordDroppedLogSize(size_t size) {
+ UMA_HISTOGRAM_COUNTS("UMA.Large Accumulated Log Not Persisted",
+ static_cast<int>(size));
+}
+
+void PersistedLogsMetricsImpl::RecordDroppedLogsNum(int dropped_logs_num) {
+ UMA_HISTOGRAM_COUNTS("UMA.UnsentLogs.Dropped", dropped_logs_num);
+}
+
+} // namespace metrics
diff --git a/components/metrics/persisted_logs_metrics_impl.h b/components/metrics/persisted_logs_metrics_impl.h
new file mode 100644
index 0000000..544acf9
--- /dev/null
+++ b/components/metrics/persisted_logs_metrics_impl.h
@@ -0,0 +1,32 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_PERSISTED_LOGS_METRICS_IMPL_H_
+#define COMPONENTS_METRICS_PERSISTED_LOGS_METRICS_IMPL_H_
+
+#include "base/macros.h"
+#include "components/metrics/persisted_logs_metrics.h"
+
+namespace metrics {
+
+// Implementation for recording metrics from PersistedLogs.
+class PersistedLogsMetricsImpl : public PersistedLogsMetrics {
+ public:
+ PersistedLogsMetricsImpl() {}
+ ~PersistedLogsMetricsImpl() override {}
+
+ // PersistedLogsMetrics:
+ void RecordLogReadStatus(PersistedLogsMetrics::LogReadStatus status) override;
+ void RecordCompressionRatio(
+ size_t compressed_size, size_t original_size) override;
+ void RecordDroppedLogSize(size_t size) override;
+ void RecordDroppedLogsNum(int dropped_logs_num) override;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(PersistedLogsMetricsImpl);
+};
+
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_PERSISTED_LOGS_METRICS_IMPL_H_
diff --git a/components/metrics/persisted_logs_unittest.cc b/components/metrics/persisted_logs_unittest.cc
new file mode 100644
index 0000000..1a6c931
--- /dev/null
+++ b/components/metrics/persisted_logs_unittest.cc
@@ -0,0 +1,290 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/persisted_logs.h"
+
+#include <stddef.h>
+
+#include "base/base64.h"
+#include "base/macros.h"
+#include "base/rand_util.h"
+#include "base/sha1.h"
+#include "base/values.h"
+#include "components/metrics/persisted_logs_metrics_impl.h"
+#include "components/prefs/pref_registry_simple.h"
+#include "components/prefs/scoped_user_pref_update.h"
+#include "components/prefs/testing_pref_service.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/zlib/google/compression_utils.h"
+
+namespace metrics {
+
+namespace {
+
+const char kTestPrefName[] = "TestPref";
+const size_t kLogCountLimit = 3;
+const size_t kLogByteLimit = 1000;
+
+// Compresses |log_data| and returns the result.
+std::string Compress(const std::string& log_data) {
+ std::string compressed_log_data;
+ EXPECT_TRUE(compression::GzipCompress(log_data, &compressed_log_data));
+ return compressed_log_data;
+}
+
+// Generates and returns log data such that its size after compression is at
+// least |min_compressed_size|.
+std::string GenerateLogWithMinCompressedSize(size_t min_compressed_size) {
+ // Since the size check is done against a compressed log, generate enough
+ // data that compresses to larger than |log_size|.
+ std::string rand_bytes = base::RandBytesAsString(min_compressed_size);
+ while (Compress(rand_bytes).size() < min_compressed_size)
+ rand_bytes.append(base::RandBytesAsString(min_compressed_size));
+ std::string base64_data_for_logging;
+ base::Base64Encode(rand_bytes, &base64_data_for_logging);
+ SCOPED_TRACE(testing::Message() << "Using random data "
+ << base64_data_for_logging);
+ return rand_bytes;
+}
+
+class PersistedLogsTest : public testing::Test {
+ public:
+ PersistedLogsTest() {
+ prefs_.registry()->RegisterListPref(kTestPrefName);
+ }
+
+ protected:
+ TestingPrefServiceSimple prefs_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(PersistedLogsTest);
+};
+
+class TestPersistedLogs : public PersistedLogs {
+ public:
+ TestPersistedLogs(PrefService* service, size_t min_log_bytes)
+ : PersistedLogs(std::unique_ptr<PersistedLogsMetricsImpl>(
+ new PersistedLogsMetricsImpl()),
+ service,
+ kTestPrefName,
+ kLogCountLimit,
+ min_log_bytes,
+ 0) {}
+
+ // Stages and removes the next log, while testing it's value.
+ void ExpectNextLog(const std::string& expected_log) {
+ StageNextLog();
+ EXPECT_EQ(staged_log(), Compress(expected_log));
+ DiscardStagedLog();
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(TestPersistedLogs);
+};
+
+} // namespace
+
+// Store and retrieve empty list_value.
+TEST_F(PersistedLogsTest, EmptyLogList) {
+ TestPersistedLogs persisted_logs(&prefs_, kLogByteLimit);
+
+ persisted_logs.PersistUnsentLogs();
+ const base::ListValue* list_value = prefs_.GetList(kTestPrefName);
+ EXPECT_EQ(0U, list_value->GetSize());
+
+ TestPersistedLogs result_persisted_logs(&prefs_, kLogByteLimit);
+ result_persisted_logs.LoadPersistedUnsentLogs();
+ EXPECT_EQ(0U, result_persisted_logs.size());
+}
+
+// Store and retrieve a single log value.
+TEST_F(PersistedLogsTest, SingleElementLogList) {
+ TestPersistedLogs persisted_logs(&prefs_, kLogByteLimit);
+
+ persisted_logs.StoreLog("Hello world!");
+ persisted_logs.PersistUnsentLogs();
+
+ TestPersistedLogs result_persisted_logs(&prefs_, kLogByteLimit);
+ result_persisted_logs.LoadPersistedUnsentLogs();
+ EXPECT_EQ(1U, result_persisted_logs.size());
+
+ // Verify that the result log matches the initial log.
+ persisted_logs.StageNextLog();
+ result_persisted_logs.StageNextLog();
+ EXPECT_EQ(persisted_logs.staged_log(), result_persisted_logs.staged_log());
+ EXPECT_EQ(persisted_logs.staged_log_hash(),
+ result_persisted_logs.staged_log_hash());
+ EXPECT_EQ(persisted_logs.staged_log_timestamp(),
+ result_persisted_logs.staged_log_timestamp());
+}
+
+// Store a set of logs over the length limit, but smaller than the min number of
+// bytes.
+TEST_F(PersistedLogsTest, LongButTinyLogList) {
+ TestPersistedLogs persisted_logs(&prefs_, kLogByteLimit);
+
+ size_t log_count = kLogCountLimit * 5;
+ for (size_t i = 0; i < log_count; ++i)
+ persisted_logs.StoreLog("x");
+
+ persisted_logs.PersistUnsentLogs();
+
+ TestPersistedLogs result_persisted_logs(&prefs_, kLogByteLimit);
+ result_persisted_logs.LoadPersistedUnsentLogs();
+ EXPECT_EQ(persisted_logs.size(), result_persisted_logs.size());
+
+ result_persisted_logs.ExpectNextLog("x");
+}
+
+// Store a set of logs over the length limit, but that doesn't reach the minimum
+// number of bytes until after passing the length limit.
+TEST_F(PersistedLogsTest, LongButSmallLogList) {
+ size_t log_count = kLogCountLimit * 5;
+ size_t log_size = 50;
+
+ std::string first_kept = "First to keep";
+ first_kept.resize(log_size, ' ');
+
+ std::string blank_log = std::string(log_size, ' ');
+
+ std::string last_kept = "Last to keep";
+ last_kept.resize(log_size, ' ');
+
+ // Set the byte limit enough to keep everything but the first two logs.
+ const size_t min_log_bytes =
+ Compress(first_kept).length() + Compress(last_kept).length() +
+ (log_count - 4) * Compress(blank_log).length();
+ TestPersistedLogs persisted_logs(&prefs_, min_log_bytes);
+
+ persisted_logs.StoreLog("one");
+ persisted_logs.StoreLog("two");
+ persisted_logs.StoreLog(first_kept);
+ for (size_t i = persisted_logs.size(); i < log_count - 1; ++i) {
+ persisted_logs.StoreLog(blank_log);
+ }
+ persisted_logs.StoreLog(last_kept);
+ persisted_logs.PersistUnsentLogs();
+
+ TestPersistedLogs result_persisted_logs(&prefs_, kLogByteLimit);
+ result_persisted_logs.LoadPersistedUnsentLogs();
+ EXPECT_EQ(persisted_logs.size() - 2, result_persisted_logs.size());
+
+ result_persisted_logs.ExpectNextLog(last_kept);
+ while (result_persisted_logs.size() > 1) {
+ result_persisted_logs.ExpectNextLog(blank_log);
+ }
+ result_persisted_logs.ExpectNextLog(first_kept);
+}
+
+// Store a set of logs within the length limit, but well over the minimum
+// number of bytes.
+TEST_F(PersistedLogsTest, ShortButLargeLogList) {
+ // Make the total byte count about twice the minimum.
+ size_t log_count = kLogCountLimit;
+ size_t log_size = (kLogByteLimit / log_count) * 2;
+ std::string log_data = GenerateLogWithMinCompressedSize(log_size);
+
+ TestPersistedLogs persisted_logs(&prefs_, kLogByteLimit);
+ for (size_t i = 0; i < log_count; ++i) {
+ persisted_logs.StoreLog(log_data);
+ }
+ persisted_logs.PersistUnsentLogs();
+
+ TestPersistedLogs result_persisted_logs(&prefs_, kLogByteLimit);
+ result_persisted_logs.LoadPersistedUnsentLogs();
+ EXPECT_EQ(persisted_logs.size(), result_persisted_logs.size());
+}
+
+// Store a set of logs over the length limit, and over the minimum number of
+// bytes.
+TEST_F(PersistedLogsTest, LongAndLargeLogList) {
+ TestPersistedLogs persisted_logs(&prefs_, kLogByteLimit);
+
+ // Include twice the max number of logs.
+ size_t log_count = kLogCountLimit * 2;
+ // Make the total byte count about four times the minimum.
+ size_t log_size = (kLogByteLimit / log_count) * 4;
+
+ std::string target_log = "First to keep";
+ target_log += GenerateLogWithMinCompressedSize(log_size);
+
+ std::string log_data = GenerateLogWithMinCompressedSize(log_size);
+ for (size_t i = 0; i < log_count; ++i) {
+ if (i == log_count - kLogCountLimit)
+ persisted_logs.StoreLog(target_log);
+ else
+ persisted_logs.StoreLog(log_data);
+ }
+
+ persisted_logs.PersistUnsentLogs();
+
+ TestPersistedLogs result_persisted_logs(&prefs_, kLogByteLimit);
+ result_persisted_logs.LoadPersistedUnsentLogs();
+ EXPECT_EQ(kLogCountLimit, result_persisted_logs.size());
+
+ while (result_persisted_logs.size() > 1) {
+ result_persisted_logs.ExpectNextLog(log_data);
+ }
+ result_persisted_logs.ExpectNextLog(target_log);
+}
+
+// Check that the store/stage/discard functions work as expected.
+TEST_F(PersistedLogsTest, Staging) {
+ TestPersistedLogs persisted_logs(&prefs_, kLogByteLimit);
+ std::string tmp;
+
+ EXPECT_FALSE(persisted_logs.has_staged_log());
+ persisted_logs.StoreLog("one");
+ EXPECT_FALSE(persisted_logs.has_staged_log());
+ persisted_logs.StoreLog("two");
+ persisted_logs.StageNextLog();
+ EXPECT_TRUE(persisted_logs.has_staged_log());
+ EXPECT_EQ(persisted_logs.staged_log(), Compress("two"));
+ persisted_logs.StoreLog("three");
+ EXPECT_EQ(persisted_logs.staged_log(), Compress("two"));
+ EXPECT_EQ(persisted_logs.size(), 3U);
+ persisted_logs.DiscardStagedLog();
+ EXPECT_FALSE(persisted_logs.has_staged_log());
+ EXPECT_EQ(persisted_logs.size(), 2U);
+ persisted_logs.StageNextLog();
+ EXPECT_EQ(persisted_logs.staged_log(), Compress("three"));
+ persisted_logs.DiscardStagedLog();
+ persisted_logs.StageNextLog();
+ EXPECT_EQ(persisted_logs.staged_log(), Compress("one"));
+ persisted_logs.DiscardStagedLog();
+ EXPECT_FALSE(persisted_logs.has_staged_log());
+ EXPECT_EQ(persisted_logs.size(), 0U);
+}
+
+TEST_F(PersistedLogsTest, DiscardOrder) {
+ // Ensure that the correct log is discarded if new logs are pushed while
+ // a log is staged.
+ TestPersistedLogs persisted_logs(&prefs_, kLogByteLimit);
+
+ persisted_logs.StoreLog("one");
+ persisted_logs.StageNextLog();
+ persisted_logs.StoreLog("two");
+ persisted_logs.DiscardStagedLog();
+ persisted_logs.PersistUnsentLogs();
+
+ TestPersistedLogs result_persisted_logs(&prefs_, kLogByteLimit);
+ result_persisted_logs.LoadPersistedUnsentLogs();
+ EXPECT_EQ(1U, result_persisted_logs.size());
+ result_persisted_logs.ExpectNextLog("two");
+}
+
+
+TEST_F(PersistedLogsTest, Hashes) {
+ const char kFooText[] = "foo";
+ const std::string foo_hash = base::SHA1HashString(kFooText);
+
+ TestPersistedLogs persisted_logs(&prefs_, kLogByteLimit);
+ persisted_logs.StoreLog(kFooText);
+ persisted_logs.StageNextLog();
+
+ EXPECT_EQ(Compress(kFooText), persisted_logs.staged_log());
+ EXPECT_EQ(foo_hash, persisted_logs.staged_log_hash());
+}
+
+} // namespace metrics
diff --git a/components/metrics/persistent_system_profile.cc b/components/metrics/persistent_system_profile.cc
new file mode 100644
index 0000000..fa35a44
--- /dev/null
+++ b/components/metrics/persistent_system_profile.cc
@@ -0,0 +1,440 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/persistent_system_profile.h"
+
+#include <set>
+
+#include "base/atomicops.h"
+#include "base/bits.h"
+#include "base/memory/singleton.h"
+#include "base/metrics/persistent_memory_allocator.h"
+#include "base/pickle.h"
+#include "base/stl_util.h"
+#include "components/variations/active_field_trials.h"
+
+namespace metrics {
+
+namespace {
+
+// To provide atomic addition of records so that there is no confusion between
+// writers and readers, all of the metadata about a record is contained in a
+// structure that can be stored as a single atomic 32-bit word.
+union RecordHeader {
+ struct {
+ unsigned continued : 1; // Flag indicating if there is more after this.
+ unsigned type : 7; // The type of this record.
+ unsigned amount : 24; // The amount of data to follow.
+ } as_parts;
+ base::subtle::Atomic32 as_atomic;
+};
+
+constexpr uint32_t kTypeIdSystemProfile = 0x330A7150; // SHA1(SystemProfile)
+constexpr size_t kSystemProfileAllocSize = 4 << 10; // 4 KiB
+constexpr size_t kMaxRecordSize = (1 << 24) - sizeof(RecordHeader);
+
+static_assert(sizeof(RecordHeader) == sizeof(base::subtle::Atomic32),
+ "bad RecordHeader size");
+
+// Calculate the size of a record based on the amount of data. This adds room
+// for the record header and rounds up to the next multiple of the record-header
+// size.
+size_t CalculateRecordSize(size_t data_amount) {
+ return base::bits::Align(data_amount + sizeof(RecordHeader),
+ sizeof(RecordHeader));
+}
+
+} // namespace
+
+PersistentSystemProfile::RecordAllocator::RecordAllocator(
+ base::PersistentMemoryAllocator* memory_allocator,
+ size_t min_size)
+ : allocator_(memory_allocator),
+ has_complete_profile_(false),
+ alloc_reference_(0),
+ alloc_size_(0),
+ end_offset_(0) {
+ AddSegment(min_size);
+}
+
+PersistentSystemProfile::RecordAllocator::RecordAllocator(
+ const base::PersistentMemoryAllocator* memory_allocator)
+ : allocator_(
+ const_cast<base::PersistentMemoryAllocator*>(memory_allocator)),
+ alloc_reference_(0),
+ alloc_size_(0),
+ end_offset_(0) {}
+
+void PersistentSystemProfile::RecordAllocator::Reset() {
+ // Clear the first word of all blocks so they're known to be "empty".
+ alloc_reference_ = 0;
+ while (NextSegment()) {
+ // Get the block as a char* and cast it. It can't be fetched directly as
+ // an array of RecordHeader because that's not a fundamental type and only
+ // arrays of fundamental types are allowed.
+ RecordHeader* header =
+ reinterpret_cast<RecordHeader*>(allocator_->GetAsArray<char>(
+ alloc_reference_, kTypeIdSystemProfile, sizeof(RecordHeader)));
+ DCHECK(header);
+ base::subtle::NoBarrier_Store(&header->as_atomic, 0);
+ }
+
+ // Reset member variables.
+ has_complete_profile_ = false;
+ alloc_reference_ = 0;
+ alloc_size_ = 0;
+ end_offset_ = 0;
+}
+
+bool PersistentSystemProfile::RecordAllocator::Write(RecordType type,
+ base::StringPiece record) {
+ const char* data = record.data();
+ size_t remaining_size = record.size();
+
+ // Allocate space and write records until everything has been stored.
+ do {
+ if (end_offset_ == alloc_size_) {
+ if (!AddSegment(remaining_size))
+ return false;
+ }
+ // Write out as much of the data as possible. |data| and |remaining_size|
+ // are updated in place.
+ if (!WriteData(type, &data, &remaining_size))
+ return false;
+ } while (remaining_size > 0);
+
+ return true;
+}
+
+bool PersistentSystemProfile::RecordAllocator::HasMoreData() const {
+ if (alloc_reference_ == 0 && !NextSegment())
+ return false;
+
+ char* block =
+ allocator_->GetAsArray<char>(alloc_reference_, kTypeIdSystemProfile,
+ base::PersistentMemoryAllocator::kSizeAny);
+ if (!block)
+ return false;
+
+ RecordHeader header;
+ header.as_atomic = base::subtle::Acquire_Load(
+ reinterpret_cast<base::subtle::Atomic32*>(block + end_offset_));
+ return header.as_parts.type != kUnusedSpace;
+}
+
+bool PersistentSystemProfile::RecordAllocator::Read(RecordType* type,
+ std::string* record) const {
+ *type = kUnusedSpace;
+ record->clear();
+
+ // Access data and read records until everything has been loaded.
+ while (true) {
+ if (end_offset_ == alloc_size_) {
+ if (!NextSegment())
+ return false;
+ }
+ if (ReadData(type, record))
+ return *type != kUnusedSpace;
+ }
+}
+
+bool PersistentSystemProfile::RecordAllocator::NextSegment() const {
+ base::PersistentMemoryAllocator::Iterator iter(allocator_, alloc_reference_);
+ alloc_reference_ = iter.GetNextOfType(kTypeIdSystemProfile);
+ alloc_size_ = allocator_->GetAllocSize(alloc_reference_);
+ end_offset_ = 0;
+ return alloc_reference_ != 0;
+}
+
+bool PersistentSystemProfile::RecordAllocator::AddSegment(size_t min_size) {
+ if (NextSegment()) {
+ // The first record-header should have been zeroed as part of the allocation
+ // or by the "reset" procedure.
+ DCHECK_EQ(0, base::subtle::NoBarrier_Load(
+ allocator_->GetAsArray<base::subtle::Atomic32>(
+ alloc_reference_, kTypeIdSystemProfile, 1)));
+ return true;
+ }
+
+ DCHECK_EQ(0U, alloc_reference_);
+ DCHECK_EQ(0U, end_offset_);
+
+ size_t size =
+ std::max(CalculateRecordSize(min_size), kSystemProfileAllocSize);
+
+ uint32_t ref = allocator_->Allocate(size, kTypeIdSystemProfile);
+ if (!ref)
+ return false; // Allocator must be full.
+ allocator_->MakeIterable(ref);
+
+ alloc_reference_ = ref;
+ alloc_size_ = allocator_->GetAllocSize(ref);
+ return true;
+}
+
+bool PersistentSystemProfile::RecordAllocator::WriteData(RecordType type,
+ const char** data,
+ size_t* data_size) {
+ char* block =
+ allocator_->GetAsArray<char>(alloc_reference_, kTypeIdSystemProfile,
+ base::PersistentMemoryAllocator::kSizeAny);
+ if (!block)
+ return false; // It's bad if there is no accessible block.
+
+ const size_t max_write_size = std::min(
+ kMaxRecordSize, alloc_size_ - end_offset_ - sizeof(RecordHeader));
+ const size_t write_size = std::min(*data_size, max_write_size);
+ const size_t record_size = CalculateRecordSize(write_size);
+ DCHECK_LT(write_size, record_size);
+
+ // Write the data and the record header.
+ RecordHeader header;
+ header.as_atomic = 0;
+ header.as_parts.type = type;
+ header.as_parts.amount = write_size;
+ header.as_parts.continued = (write_size < *data_size);
+ size_t offset = end_offset_;
+ end_offset_ += record_size;
+ DCHECK_GE(alloc_size_, end_offset_);
+ if (end_offset_ < alloc_size_) {
+ // An empty record header has to be next before this one gets written.
+ base::subtle::NoBarrier_Store(
+ reinterpret_cast<base::subtle::Atomic32*>(block + end_offset_), 0);
+ }
+ memcpy(block + offset + sizeof(header), *data, write_size);
+ base::subtle::Release_Store(
+ reinterpret_cast<base::subtle::Atomic32*>(block + offset),
+ header.as_atomic);
+
+ // Account for what was stored and prepare for follow-on records with any
+ // remaining data.
+ *data += write_size;
+ *data_size -= write_size;
+
+ return true;
+}
+
+bool PersistentSystemProfile::RecordAllocator::ReadData(
+ RecordType* type,
+ std::string* record) const {
+ DCHECK_GT(alloc_size_, end_offset_);
+
+ char* block =
+ allocator_->GetAsArray<char>(alloc_reference_, kTypeIdSystemProfile,
+ base::PersistentMemoryAllocator::kSizeAny);
+ if (!block) {
+ *type = kUnusedSpace;
+ return true; // No more data.
+ }
+
+ // Get and validate the record header.
+ RecordHeader header;
+ header.as_atomic = base::subtle::Acquire_Load(
+ reinterpret_cast<base::subtle::Atomic32*>(block + end_offset_));
+ bool continued = !!header.as_parts.continued;
+ if (header.as_parts.type == kUnusedSpace) {
+ *type = kUnusedSpace;
+ return true; // End of all records.
+ } else if (*type == kUnusedSpace) {
+ *type = static_cast<RecordType>(header.as_parts.type);
+ } else if (*type != header.as_parts.type) {
+ NOTREACHED(); // Continuation didn't match start of record.
+ *type = kUnusedSpace;
+ record->clear();
+ return false;
+ }
+ size_t read_size = header.as_parts.amount;
+ if (end_offset_ + sizeof(header) + read_size > alloc_size_) {
+ NOTREACHED(); // Invalid header amount.
+ *type = kUnusedSpace;
+ return true; // Don't try again.
+ }
+
+ // Append the record data to the output string.
+ record->append(block + end_offset_ + sizeof(header), read_size);
+ end_offset_ += CalculateRecordSize(read_size);
+ DCHECK_GE(alloc_size_, end_offset_);
+
+ return !continued;
+}
+
+PersistentSystemProfile::PersistentSystemProfile() {}
+
+PersistentSystemProfile::~PersistentSystemProfile() {}
+
+void PersistentSystemProfile::RegisterPersistentAllocator(
+ base::PersistentMemoryAllocator* memory_allocator) {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+
+ // Create and store the allocator. A |min_size| of "1" ensures that a memory
+ // block is reserved now.
+ RecordAllocator allocator(memory_allocator, 1);
+ allocators_.push_back(std::move(allocator));
+ all_have_complete_profile_ = false;
+}
+
+void PersistentSystemProfile::DeregisterPersistentAllocator(
+ base::PersistentMemoryAllocator* memory_allocator) {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+
+ // This would be more efficient with a std::map but it's not expected that
+ // allocators will get deregistered with any frequency, if at all.
+ base::EraseIf(allocators_, [=](RecordAllocator& records) {
+ return records.allocator() == memory_allocator;
+ });
+}
+
+void PersistentSystemProfile::SetSystemProfile(
+ const std::string& serialized_profile,
+ bool complete) {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+
+ if (allocators_.empty() || serialized_profile.empty())
+ return;
+
+ for (auto& allocator : allocators_) {
+ // Don't overwrite a complete profile with an incomplete one.
+ if (!complete && allocator.has_complete_profile())
+ continue;
+ // A full system profile always starts fresh. Incomplete keeps existing
+ // records for merging.
+ if (complete)
+ allocator.Reset();
+ // Write out the serialized profile.
+ allocator.Write(kSystemProfileProto, serialized_profile);
+ // Indicate if this is a complete profile.
+ if (complete)
+ allocator.set_complete_profile();
+ }
+
+ if (complete)
+ all_have_complete_profile_ = true;
+}
+
+void PersistentSystemProfile::SetSystemProfile(
+ const SystemProfileProto& profile,
+ bool complete) {
+ // Avoid serialization if passed profile is not complete and all allocators
+ // already have complete ones.
+ if (!complete && all_have_complete_profile_)
+ return;
+
+ std::string serialized_profile;
+ if (!profile.SerializeToString(&serialized_profile))
+ return;
+ SetSystemProfile(serialized_profile, complete);
+}
+
+void PersistentSystemProfile::AddFieldTrial(base::StringPiece trial,
+ base::StringPiece group) {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+ DCHECK(!trial.empty());
+ DCHECK(!group.empty());
+
+ base::Pickle pickler;
+ pickler.WriteString(trial);
+ pickler.WriteString(group);
+
+ WriteToAll(kFieldTrialInfo,
+ base::StringPiece(static_cast<const char*>(pickler.data()),
+ pickler.size()));
+}
+
+// static
+bool PersistentSystemProfile::HasSystemProfile(
+ const base::PersistentMemoryAllocator& memory_allocator) {
+ const RecordAllocator records(&memory_allocator);
+ return records.HasMoreData();
+}
+
+// static
+bool PersistentSystemProfile::GetSystemProfile(
+ const base::PersistentMemoryAllocator& memory_allocator,
+ SystemProfileProto* system_profile) {
+ const RecordAllocator records(&memory_allocator);
+
+ RecordType type;
+ std::string record;
+ do {
+ if (!records.Read(&type, &record))
+ return false;
+ } while (type != kSystemProfileProto);
+
+ if (!system_profile)
+ return true;
+
+ if (!system_profile->ParseFromString(record))
+ return false;
+
+ MergeUpdateRecords(memory_allocator, system_profile);
+ return true;
+}
+
+// static
+void PersistentSystemProfile::MergeUpdateRecords(
+ const base::PersistentMemoryAllocator& memory_allocator,
+ SystemProfileProto* system_profile) {
+ const RecordAllocator records(&memory_allocator);
+
+ RecordType type;
+ std::string record;
+ std::set<uint32_t> known_field_trial_ids;
+
+ // This is done separate from the code that gets the profile because it
+ // compartmentalizes the code and makes it possible to reuse this section
+ // should it be needed to merge "update" records into a new "complete"
+ // system profile that somehow didn't get all the updates.
+ while (records.Read(&type, &record)) {
+ switch (type) {
+ case kUnusedSpace:
+ // These should never be returned.
+ NOTREACHED();
+ break;
+
+ case kSystemProfileProto:
+ // Profile was passed in; ignore this one.
+ break;
+
+ case kFieldTrialInfo: {
+ // Get the set of known trial IDs so duplicates don't get added.
+ if (known_field_trial_ids.empty()) {
+ for (int i = 0; i < system_profile->field_trial_size(); ++i) {
+ known_field_trial_ids.insert(
+ system_profile->field_trial(i).name_id());
+ }
+ }
+
+ base::Pickle pickler(record.data(), record.size());
+ base::PickleIterator iter(pickler);
+ base::StringPiece trial;
+ base::StringPiece group;
+ if (iter.ReadStringPiece(&trial) && iter.ReadStringPiece(&group)) {
+ variations::ActiveGroupId field_ids =
+ variations::MakeActiveGroupId(trial, group);
+ if (!base::ContainsKey(known_field_trial_ids, field_ids.name)) {
+ SystemProfileProto::FieldTrial* field_trial =
+ system_profile->add_field_trial();
+ field_trial->set_name_id(field_ids.name);
+ field_trial->set_group_id(field_ids.group);
+ known_field_trial_ids.insert(field_ids.name);
+ }
+ }
+ } break;
+ }
+ }
+}
+
+void PersistentSystemProfile::WriteToAll(RecordType type,
+ base::StringPiece record) {
+ for (auto& allocator : allocators_)
+ allocator.Write(type, record);
+}
+
+GlobalPersistentSystemProfile* GlobalPersistentSystemProfile::GetInstance() {
+ return base::Singleton<
+ GlobalPersistentSystemProfile,
+ base::LeakySingletonTraits<GlobalPersistentSystemProfile>>::get();
+}
+
+} // namespace metrics
diff --git a/components/metrics/persistent_system_profile.h b/components/metrics/persistent_system_profile.h
new file mode 100644
index 0000000..6c854a9
--- /dev/null
+++ b/components/metrics/persistent_system_profile.h
@@ -0,0 +1,160 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_METRICS_PERSISTENT_SYSTEM_PROFILE_H_
+#define BASE_METRICS_PERSISTENT_SYSTEM_PROFILE_H_
+
+#include <vector>
+
+#include "base/strings/string_piece.h"
+#include "base/threading/thread_checker.h"
+#include "third_party/metrics_proto/system_profile.pb.h"
+
+namespace base {
+template <typename T>
+struct DefaultSingletonTraits;
+class PersistentMemoryAllocator;
+} // namespace base
+
+namespace metrics {
+
+// Manages a copy of the system profile inside persistent memory segments.
+class PersistentSystemProfile {
+ public:
+ PersistentSystemProfile();
+ ~PersistentSystemProfile();
+
+ // This object can store records in multiple memory allocators.
+ void RegisterPersistentAllocator(
+ base::PersistentMemoryAllocator* memory_allocator);
+ void DeregisterPersistentAllocator(
+ base::PersistentMemoryAllocator* memory_allocator);
+
+ // Stores a complete system profile. Use the version taking the serialized
+ // version if available to avoid multiple serialization actions. The
+ // |complete| flag indicates that this profile contains all known information
+ // and can replace whatever exists. If the flag is false, the profile will be
+ // stored only if there is nothing else already present.
+ void SetSystemProfile(const std::string& serialized_profile, bool complete);
+ void SetSystemProfile(const SystemProfileProto& profile, bool complete);
+
+ // Records the existence of a field trial.
+ void AddFieldTrial(base::StringPiece trial, base::StringPiece group);
+
+ // Tests if a persistent memory allocator contains an system profile.
+ static bool HasSystemProfile(
+ const base::PersistentMemoryAllocator& memory_allocator);
+
+ // Retrieves the system profile from a persistent memory allocator. Returns
+ // true if a profile was successfully retrieved. If null is passed for the
+ // |system_profile|, only a basic check for the existence of one will be
+ // done.
+ static bool GetSystemProfile(
+ const base::PersistentMemoryAllocator& memory_allocator,
+ SystemProfileProto* system_profile);
+
+ private:
+ friend class PersistentSystemProfileTest;
+
+ // Defines record types that can be stored inside our local Allocators.
+ enum RecordType : uint8_t {
+ kUnusedSpace = 0, // The default value for empty memory.
+ kSystemProfileProto,
+ kFieldTrialInfo,
+ };
+
+ // A class for managing record allocations inside a persistent memory segment.
+ class RecordAllocator {
+ public:
+ // Construct an allocator for writing.
+ RecordAllocator(base::PersistentMemoryAllocator* memory_allocator,
+ size_t min_size);
+
+ // Construct an allocator for reading.
+ RecordAllocator(const base::PersistentMemoryAllocator* memory_allocator);
+
+ // These methods manage writing records to the allocator. Do not mix these
+ // with "read" calls; it's one or the other.
+ void Reset();
+ bool Write(RecordType type, base::StringPiece record);
+
+ // Read a record from the allocator. Do not mix this with "write" calls;
+ // it's one or the other.
+ bool HasMoreData() const;
+ bool Read(RecordType* type, std::string* record) const;
+
+ base::PersistentMemoryAllocator* allocator() { return allocator_; }
+
+ bool has_complete_profile() { return has_complete_profile_; }
+ void set_complete_profile() { has_complete_profile_ = true; }
+
+ private:
+ // Advance to the next record segment in the memory allocator.
+ bool NextSegment() const;
+
+ // Advance to the next record segment, creating a new one if necessary with
+ // sufficent |min_size| space.
+ bool AddSegment(size_t min_size);
+
+ // Writes data to the current position, updating the passed values past
+ // the amount written. Returns false in case of an error.
+ bool WriteData(RecordType type, const char** data, size_t* data_size);
+
+ // Reads data from the current position, updating the passed string
+ // in-place. |type| must be initialized to kUnusedSpace and |record| must
+ // be an empty string before the first call but unchanged thereafter.
+ // Returns true when record is complete.
+ bool ReadData(RecordType* type, std::string* record) const;
+
+ // This never changes but can't be "const" because vector calls operator=().
+ base::PersistentMemoryAllocator* allocator_; // Storage location.
+
+ // Indicates if a complete profile has been stored.
+ bool has_complete_profile_;
+
+ // These change even though the underlying data may be "const".
+ mutable uint32_t alloc_reference_; // Last storage block.
+ mutable size_t alloc_size_; // Size of the block.
+ mutable size_t end_offset_; // End of data in block.
+
+ // Copy and assign are allowed for easy use with STL containers.
+ };
+
+ // Write a record to all registered allocators.
+ void WriteToAll(RecordType type, base::StringPiece record);
+
+ // Merges all "update" records into a system profile.
+ static void MergeUpdateRecords(
+ const base::PersistentMemoryAllocator& memory_allocator,
+ SystemProfileProto* system_profile);
+
+ // The list of registered persistent allocators, described by RecordAllocator
+ // instances.
+ std::vector<RecordAllocator> allocators_;
+
+ // Indicates if a complete profile has been stored to all allocators.
+ bool all_have_complete_profile_ = false;
+
+ THREAD_CHECKER(thread_checker_);
+
+ DISALLOW_COPY_AND_ASSIGN(PersistentSystemProfile);
+};
+
+// A singleton instance of the above.
+class GlobalPersistentSystemProfile : public PersistentSystemProfile {
+ public:
+ static GlobalPersistentSystemProfile* GetInstance();
+
+ private:
+ friend struct base::DefaultSingletonTraits<GlobalPersistentSystemProfile>;
+
+ GlobalPersistentSystemProfile() {}
+ ~GlobalPersistentSystemProfile() {}
+
+ DISALLOW_COPY_AND_ASSIGN(GlobalPersistentSystemProfile);
+};
+
+} // namespace metrics
+
+#endif // BASE_METRICS_PERSISTENT_SYSTEM_PROFILE_H_
diff --git a/components/metrics/persistent_system_profile_unittest.cc b/components/metrics/persistent_system_profile_unittest.cc
new file mode 100644
index 0000000..b3a7ec6
--- /dev/null
+++ b/components/metrics/persistent_system_profile_unittest.cc
@@ -0,0 +1,172 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/persistent_system_profile.h"
+
+#include <memory>
+
+#include "base/logging.h"
+#include "base/metrics/persistent_memory_allocator.h"
+#include "base/rand_util.h"
+#include "components/variations/hashing.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace metrics {
+
+class PersistentSystemProfileTest : public testing::Test {
+ public:
+ const int32_t kAllocatorMemorySize = 1 << 20; // 1 MiB
+
+ PersistentSystemProfileTest() {}
+ ~PersistentSystemProfileTest() override {}
+
+ void SetUp() override {
+ memory_allocator_ = std::make_unique<base::LocalPersistentMemoryAllocator>(
+ kAllocatorMemorySize, 0, "");
+ records_ = std::make_unique<PersistentSystemProfile::RecordAllocator>(
+ memory_allocator_.get());
+ persistent_profile_.RegisterPersistentAllocator(memory_allocator_.get());
+ }
+
+ void TearDown() override {
+ persistent_profile_.DeregisterPersistentAllocator(memory_allocator_.get());
+ records_.reset();
+ memory_allocator_.reset();
+ }
+
+ void WriteRecord(uint8_t type, const std::string& record) {
+ persistent_profile_.allocators_[0].Write(
+ static_cast<PersistentSystemProfile::RecordType>(type), record);
+ }
+
+ bool ReadRecord(uint8_t* type, std::string* record) {
+ PersistentSystemProfile::RecordType rec_type;
+
+ bool success = records_->Read(&rec_type, record);
+ *type = rec_type; // Convert to uint8_t for testing.
+ return success;
+ }
+
+ base::PersistentMemoryAllocator* memory_allocator() {
+ return memory_allocator_.get();
+ }
+
+ PersistentSystemProfile* persistent_profile() { return &persistent_profile_; }
+
+ private:
+ PersistentSystemProfile persistent_profile_;
+ std::unique_ptr<base::PersistentMemoryAllocator> memory_allocator_;
+ std::unique_ptr<PersistentSystemProfile::RecordAllocator> records_;
+
+ DISALLOW_COPY_AND_ASSIGN(PersistentSystemProfileTest);
+};
+
+TEST_F(PersistentSystemProfileTest, Create) {
+ uint32_t type;
+ base::PersistentMemoryAllocator::Iterator iter(memory_allocator());
+ base::PersistentMemoryAllocator::Reference ref = iter.GetNext(&type);
+ DCHECK(ref);
+ DCHECK_NE(0U, type);
+}
+
+TEST_F(PersistentSystemProfileTest, RecordSplitting) {
+ const size_t kRecordSize = 100 << 10; // 100 KiB
+ std::vector<char> buffer;
+ buffer.resize(kRecordSize);
+ base::RandBytes(&buffer[0], kRecordSize);
+
+ WriteRecord(42, std::string(&buffer[0], kRecordSize));
+
+ uint8_t type;
+ std::string record;
+ ASSERT_TRUE(ReadRecord(&type, &record));
+ EXPECT_EQ(42U, type);
+ ASSERT_EQ(kRecordSize, record.size());
+ for (size_t i = 0; i < kRecordSize; ++i)
+ EXPECT_EQ(buffer[i], record[i]);
+}
+
+TEST_F(PersistentSystemProfileTest, ProfileStorage) {
+ SystemProfileProto proto1;
+ SystemProfileProto::FieldTrial* trial = proto1.add_field_trial();
+ trial->set_name_id(123);
+ trial->set_group_id(456);
+
+ persistent_profile()->SetSystemProfile(proto1, false);
+
+ SystemProfileProto proto2;
+ ASSERT_TRUE(PersistentSystemProfile::HasSystemProfile(*memory_allocator()));
+ ASSERT_TRUE(
+ PersistentSystemProfile::GetSystemProfile(*memory_allocator(), &proto2));
+ ASSERT_EQ(1, proto2.field_trial_size());
+ EXPECT_EQ(123U, proto2.field_trial(0).name_id());
+ EXPECT_EQ(456U, proto2.field_trial(0).group_id());
+
+ // Check that the profile can be overwritten.
+
+ trial = proto1.add_field_trial();
+ trial->set_name_id(78);
+ trial->set_group_id(90);
+
+ persistent_profile()->SetSystemProfile(proto1, true);
+
+ ASSERT_TRUE(
+ PersistentSystemProfile::GetSystemProfile(*memory_allocator(), &proto2));
+ ASSERT_EQ(2, proto2.field_trial_size());
+ EXPECT_EQ(123U, proto2.field_trial(0).name_id());
+ EXPECT_EQ(456U, proto2.field_trial(0).group_id());
+ EXPECT_EQ(78U, proto2.field_trial(1).name_id());
+ EXPECT_EQ(90U, proto2.field_trial(1).group_id());
+
+ // Check that the profile won't be overwritten by a new non-complete profile.
+
+ trial = proto1.add_field_trial();
+ trial->set_name_id(0xC0DE);
+ trial->set_group_id(0xFEED);
+
+ persistent_profile()->SetSystemProfile(proto1, false);
+
+ ASSERT_TRUE(
+ PersistentSystemProfile::GetSystemProfile(*memory_allocator(), &proto2));
+ ASSERT_EQ(2, proto2.field_trial_size());
+ EXPECT_EQ(123U, proto2.field_trial(0).name_id());
+ EXPECT_EQ(456U, proto2.field_trial(0).group_id());
+ EXPECT_EQ(78U, proto2.field_trial(1).name_id());
+ EXPECT_EQ(90U, proto2.field_trial(1).group_id());
+}
+
+TEST_F(PersistentSystemProfileTest, ProfileExtensions) {
+ persistent_profile()->AddFieldTrial("sna", "foo");
+
+ SystemProfileProto fetched;
+ ASSERT_FALSE(
+ PersistentSystemProfile::GetSystemProfile(*memory_allocator(), &fetched));
+
+ SystemProfileProto proto;
+ SystemProfileProto::FieldTrial* trial = proto.add_field_trial();
+ trial->set_name_id(123);
+ trial->set_group_id(456);
+
+ persistent_profile()->SetSystemProfile(proto, false);
+ ASSERT_TRUE(
+ PersistentSystemProfile::GetSystemProfile(*memory_allocator(), &fetched));
+ ASSERT_EQ(2, fetched.field_trial_size());
+ EXPECT_EQ(123U, fetched.field_trial(0).name_id());
+ EXPECT_EQ(456U, fetched.field_trial(0).group_id());
+ EXPECT_EQ(variations::HashName("sna"), fetched.field_trial(1).name_id());
+ EXPECT_EQ(variations::HashName("foo"), fetched.field_trial(1).group_id());
+
+ persistent_profile()->AddFieldTrial("foo", "bar");
+ ASSERT_TRUE(
+ PersistentSystemProfile::GetSystemProfile(*memory_allocator(), &fetched));
+ ASSERT_EQ(3, fetched.field_trial_size());
+ EXPECT_EQ(123U, fetched.field_trial(0).name_id());
+ EXPECT_EQ(456U, fetched.field_trial(0).group_id());
+ EXPECT_EQ(variations::HashName("sna"), fetched.field_trial(1).name_id());
+ EXPECT_EQ(variations::HashName("foo"), fetched.field_trial(1).group_id());
+ EXPECT_EQ(variations::HashName("foo"), fetched.field_trial(2).name_id());
+ EXPECT_EQ(variations::HashName("bar"), fetched.field_trial(2).group_id());
+}
+
+} // namespace metrics
diff --git a/components/metrics/public/cpp/BUILD.gn b/components/metrics/public/cpp/BUILD.gn
new file mode 100644
index 0000000..b9c2af5
--- /dev/null
+++ b/components/metrics/public/cpp/BUILD.gn
@@ -0,0 +1,20 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//mojo/public/tools/bindings/mojom.gni")
+
+source_set("call_stack_unit_tests") {
+ testonly = true
+ sources = [
+ "call_stack_profile_struct_traits_unittest.cc",
+ ]
+
+ deps = [
+ "//base",
+ "//components/metrics/public/interfaces:call_stack_mojo_test_bindings",
+ "//mojo/public/cpp/bindings",
+ "//testing/gtest",
+ "//third_party/metrics_proto",
+ ]
+}
diff --git a/components/metrics/public/cpp/OWNERS b/components/metrics/public/cpp/OWNERS
new file mode 100644
index 0000000..2c44a46
--- /dev/null
+++ b/components/metrics/public/cpp/OWNERS
@@ -0,0 +1,6 @@
+per-file *.mojom=set noparent
+per-file *.mojom=file://ipc/SECURITY_OWNERS
+per-file *_struct_traits*.*=set noparent
+per-file *_struct_traits*.*=file://ipc/SECURITY_OWNERS
+per-file *.typemap=set noparent
+per-file *.typemap=file://ipc/SECURITY_OWNERS
diff --git a/components/metrics/public/cpp/call_stack_profile.typemap b/components/metrics/public/cpp/call_stack_profile.typemap
new file mode 100644
index 0000000..e7fa49f
--- /dev/null
+++ b/components/metrics/public/cpp/call_stack_profile.typemap
@@ -0,0 +1,14 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+mojom =
+ "//components/metrics/public/interfaces/call_stack_profile_collector.mojom"
+public_headers = [ "//third_party/metrics_proto/sampled_profile.pb.h" ]
+traits_headers =
+ [ "//components/metrics/public/cpp/call_stack_profile_struct_traits.h" ]
+deps = [
+ "//third_party/metrics_proto",
+]
+type_mappings =
+ [ "metrics.mojom.SampledProfile=metrics::SampledProfile[move_only]" ]
diff --git a/components/metrics/public/cpp/call_stack_profile_struct_traits.h b/components/metrics/public/cpp/call_stack_profile_struct_traits.h
new file mode 100644
index 0000000..bdcc617
--- /dev/null
+++ b/components/metrics/public/cpp/call_stack_profile_struct_traits.h
@@ -0,0 +1,48 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Defines StructTraits specializations for translating between mojo types and
+// metrics:: types, with data validity checks.
+
+#ifndef COMPONENTS_METRICS_PUBLIC_CPP_CALL_STACK_PROFILE_STRUCT_TRAITS_H_
+#define COMPONENTS_METRICS_PUBLIC_CPP_CALL_STACK_PROFILE_STRUCT_TRAITS_H_
+
+#include <string>
+
+#include "base/strings/string_piece.h"
+#include "components/metrics/public/interfaces/call_stack_profile_collector.mojom.h"
+#include "third_party/metrics_proto/sampled_profile.pb.h"
+
+namespace mojo {
+
+template <>
+struct StructTraits<metrics::mojom::SampledProfileDataView,
+ metrics::SampledProfile> {
+ static std::string contents(const metrics::SampledProfile& profile) {
+ std::string output;
+ profile.SerializeToString(&output);
+ return output;
+ }
+
+ static bool Read(metrics::mojom::SampledProfileDataView data,
+ metrics::SampledProfile* out) {
+ base::StringPiece contents;
+ if (!data.ReadContents(&contents))
+ return false;
+
+ if (!out->ParseFromArray(contents.data(), contents.size()))
+ return false;
+
+ // This is purely a sanity check to minimize bad data uploaded, and not
+ // required for security reasons.
+ if (!out->unknown_fields().empty())
+ return false;
+
+ return true;
+ }
+};
+
+} // namespace mojo
+
+#endif // COMPONENTS_METRICS_PUBLIC_CPP_CALL_STACK_PROFILE_STRUCT_TRAITS_H_
diff --git a/components/metrics/public/cpp/call_stack_profile_struct_traits_unittest.cc b/components/metrics/public/cpp/call_stack_profile_struct_traits_unittest.cc
new file mode 100644
index 0000000..236d619
--- /dev/null
+++ b/components/metrics/public/cpp/call_stack_profile_struct_traits_unittest.cc
@@ -0,0 +1,101 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <utility>
+
+#include "base/macros.h"
+#include "base/message_loop/message_loop.h"
+#include "components/metrics/public/interfaces/call_stack_profile_collector_test.mojom.h"
+#include "mojo/public/cpp/bindings/binding.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/metrics_proto/sampled_profile.pb.h"
+
+namespace metrics {
+
+class CallStackProfileCollectorTestImpl
+ : public mojom::CallStackProfileCollectorTest {
+ public:
+ explicit CallStackProfileCollectorTestImpl(
+ mojo::InterfaceRequest<mojom::CallStackProfileCollectorTest> request)
+ : binding_(this, std::move(request)) {
+ }
+
+ // CallStackProfileCollectorTest:
+ void BounceSampledProfile(SampledProfile in,
+ BounceSampledProfileCallback callback) override {
+ std::move(callback).Run(in);
+ }
+
+ private:
+ mojo::Binding<CallStackProfileCollectorTest> binding_;
+
+ DISALLOW_COPY_AND_ASSIGN(CallStackProfileCollectorTestImpl);
+};
+
+class CallStackProfileStructTraitsTest : public testing::Test {
+ public:
+ CallStackProfileStructTraitsTest() : impl_(MakeRequest(&proxy_)) {}
+
+ protected:
+ base::MessageLoop message_loop_;
+ mojom::CallStackProfileCollectorTestPtr proxy_;
+ CallStackProfileCollectorTestImpl impl_;
+
+ DISALLOW_COPY_AND_ASSIGN(CallStackProfileStructTraitsTest);
+};
+
+// Checks serialization/deserialization of SampledProfile.
+TEST_F(CallStackProfileStructTraitsTest, SampledProfile) {
+ // Construct a SampledProfile protocol buffer message.
+ SampledProfile input_proto;
+
+ CallStackProfile* proto_profile = input_proto.mutable_call_stack_profile();
+
+ CallStackProfile::Sample* proto_sample =
+ proto_profile->add_deprecated_sample();
+ proto_sample->set_count(1);
+ CallStackProfile::Location* location = proto_sample->add_frame();
+ location->set_address(0x10ULL);
+ location->set_module_id_index(0);
+
+ CallStackProfile::ModuleIdentifier* module_id =
+ proto_profile->add_module_id();
+ module_id->set_build_id("a");
+ module_id->set_name_md5_prefix(111U);
+
+ proto_profile->set_profile_duration_ms(1000);
+ proto_profile->set_sampling_period_ms(2000);
+
+ // Send the message round trip, and verify those values.
+ SampledProfile output_proto;
+ EXPECT_TRUE(
+ proxy_->BounceSampledProfile(std::move(input_proto), &output_proto));
+
+ const CallStackProfile& out_profile = output_proto.call_stack_profile();
+
+ ASSERT_EQ(1, out_profile.deprecated_sample_size());
+ ASSERT_EQ(1, out_profile.deprecated_sample(0).frame_size());
+
+ ASSERT_TRUE(out_profile.deprecated_sample(0).frame(0).has_address());
+ EXPECT_EQ(0x10ULL, out_profile.deprecated_sample(0).frame(0).address());
+
+ ASSERT_TRUE(out_profile.deprecated_sample(0).frame(0).has_module_id_index());
+ EXPECT_EQ(0, out_profile.deprecated_sample(0).frame(0).module_id_index());
+
+ ASSERT_EQ(1, out_profile.module_id().size());
+
+ ASSERT_TRUE(out_profile.module_id(0).has_build_id());
+ ASSERT_EQ("a", out_profile.module_id(0).build_id());
+
+ ASSERT_TRUE(out_profile.module_id(0).has_name_md5_prefix());
+ ASSERT_EQ(111U, out_profile.module_id(0).name_md5_prefix());
+
+ ASSERT_TRUE(out_profile.has_profile_duration_ms());
+ EXPECT_EQ(1000, out_profile.profile_duration_ms());
+
+ ASSERT_TRUE(out_profile.has_sampling_period_ms());
+ EXPECT_EQ(2000, out_profile.sampling_period_ms());
+}
+
+} // namespace metrics
diff --git a/components/metrics/public/cpp/call_stack_profile_unittest.typemap b/components/metrics/public/cpp/call_stack_profile_unittest.typemap
new file mode 100644
index 0000000..5a8cd99
--- /dev/null
+++ b/components/metrics/public/cpp/call_stack_profile_unittest.typemap
@@ -0,0 +1,18 @@
+# Copyright 2018 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This file is necessary because without it compiling
+# call_stack_profile_struct_traits_unittest produces error below:
+# "gen\third_party/metrics_proto/sampled_profile.pb.h(9,10): fatal error:
+# 'google/protobuf/stubs/common.h' file not found".
+
+mojom = "//components/metrics/public/interfaces/call_stack_profile_collector_test.mojom"
+public_headers = [ "//third_party/metrics_proto/sampled_profile.pb.h" ]
+traits_headers =
+ [ "//components/metrics/public/cpp/call_stack_profile_struct_traits.h" ]
+deps = [
+ "//third_party/metrics_proto",
+]
+type_mappings =
+ [ "metrics.mojom.SampledProfile=metrics::SampledProfile[move_only]" ]
diff --git a/components/metrics/public/cpp/typemaps.gni b/components/metrics/public/cpp/typemaps.gni
new file mode 100644
index 0000000..9e4e1db
--- /dev/null
+++ b/components/metrics/public/cpp/typemaps.gni
@@ -0,0 +1,8 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+typemaps = [
+ "//components/metrics/public/cpp/call_stack_profile.typemap",
+ "//components/metrics/public/cpp/call_stack_profile_unittest.typemap",
+]
diff --git a/components/metrics/public/interfaces/BUILD.gn b/components/metrics/public/interfaces/BUILD.gn
new file mode 100644
index 0000000..2a84979
--- /dev/null
+++ b/components/metrics/public/interfaces/BUILD.gn
@@ -0,0 +1,31 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//mojo/public/tools/bindings/mojom.gni")
+
+mojom("call_stack_mojo_bindings") {
+ sources = [
+ "call_stack_profile_collector.mojom",
+ ]
+
+ deps = [
+ "//mojo/public/mojom/base",
+ ]
+}
+
+mojom("call_stack_mojo_test_bindings") {
+ sources = [
+ "call_stack_profile_collector_test.mojom",
+ ]
+
+ deps = [
+ ":call_stack_mojo_bindings",
+ ]
+}
+
+mojom("single_sample_metrics_mojo_bindings") {
+ sources = [
+ "single_sample_metrics.mojom",
+ ]
+}
diff --git a/components/metrics/public/interfaces/OWNERS b/components/metrics/public/interfaces/OWNERS
new file mode 100644
index 0000000..1544352
--- /dev/null
+++ b/components/metrics/public/interfaces/OWNERS
@@ -0,0 +1,4 @@
+per-file *.mojom=set noparent
+per-file *.mojom=file://ipc/SECURITY_OWNERS
+per-file *_struct_traits*.*=set noparent
+per-file *_struct_traits*.*=file://ipc/SECURITY_OWNERS
diff --git a/components/metrics/public/interfaces/call_stack_profile_collector.mojom b/components/metrics/public/interfaces/call_stack_profile_collector.mojom
new file mode 100644
index 0000000..51fcf81
--- /dev/null
+++ b/components/metrics/public/interfaces/call_stack_profile_collector.mojom
@@ -0,0 +1,21 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+module metrics.mojom;
+
+import "mojo/public/mojom/base/time.mojom";
+
+// |contents| is a serialized protobuf from
+// src/third_party/metrics_proto/sampled_profile.proto.
+//
+// We pass this state via serialized protobuf because that is the ultimate
+// metrics upload format.
+struct SampledProfile {
+ string contents;
+};
+
+interface CallStackProfileCollector {
+ Collect(mojo_base.mojom.TimeTicks start_timestamp,
+ SampledProfile profile);
+};
diff --git a/components/metrics/public/interfaces/call_stack_profile_collector_test.mojom b/components/metrics/public/interfaces/call_stack_profile_collector_test.mojom
new file mode 100644
index 0000000..c7725c6
--- /dev/null
+++ b/components/metrics/public/interfaces/call_stack_profile_collector_test.mojom
@@ -0,0 +1,12 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+module metrics.mojom;
+
+import "components/metrics/public/interfaces/call_stack_profile_collector.mojom";
+
+interface CallStackProfileCollectorTest {
+ [Sync]
+ BounceSampledProfile(SampledProfile in) => (SampledProfile out);
+};
diff --git a/components/metrics/public/interfaces/single_sample_metrics.mojom b/components/metrics/public/interfaces/single_sample_metrics.mojom
new file mode 100644
index 0000000..788150c
--- /dev/null
+++ b/components/metrics/public/interfaces/single_sample_metrics.mojom
@@ -0,0 +1,24 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+module metrics.mojom;
+
+// See components/metrics/single_sample_metrics_factory_impl.h for details.
+interface SingleSampleMetricsProvider {
+ // Returns a SingleSampleMetric.
+ //
+ // A single sample metric only reports its sample once at destruction time.
+ // The sample may be changed prior to destruction using the SetSample() method
+ // as many times as desired.
+ //
+ // See base/metrics/histograms.h for parameter definitions. |request| is the
+ // returned histogram.
+ AcquireSingleSampleMetric(string histogram_name, int32 min, int32 max,
+ uint32 bucket_count, int32 flags,
+ SingleSampleMetric& request);
+};
+
+interface SingleSampleMetric {
+ SetSample(int32 sample);
+};
diff --git a/components/metrics/reporting_service.cc b/components/metrics/reporting_service.cc
new file mode 100644
index 0000000..6382804
--- /dev/null
+++ b/components/metrics/reporting_service.cc
@@ -0,0 +1,214 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// ReportingService handles uploading serialized logs to a server.
+
+#include "components/metrics/reporting_service.h"
+
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/command_line.h"
+#include "base/strings/string_number_conversions.h"
+#include "components/metrics/data_use_tracker.h"
+#include "components/metrics/log_store.h"
+#include "components/metrics/metrics_log_uploader.h"
+#include "components/metrics/metrics_service_client.h"
+#include "components/metrics/metrics_upload_scheduler.h"
+
+namespace metrics {
+
+// static
+void ReportingService::RegisterPrefs(PrefRegistrySimple* registry) {
+ DataUseTracker::RegisterPrefs(registry);
+}
+
+ReportingService::ReportingService(MetricsServiceClient* client,
+ PrefService* local_state,
+ size_t max_retransmit_size)
+ : client_(client),
+ max_retransmit_size_(max_retransmit_size),
+ reporting_active_(false),
+ log_upload_in_progress_(false),
+ data_use_tracker_(DataUseTracker::Create(local_state)),
+ self_ptr_factory_(this) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK(client_);
+ DCHECK(local_state);
+}
+
+ReportingService::~ReportingService() {
+ DisableReporting();
+}
+
+void ReportingService::Initialize() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK(!upload_scheduler_);
+ log_store()->LoadPersistedUnsentLogs();
+ base::Closure send_next_log_callback = base::Bind(
+ &ReportingService::SendNextLog, self_ptr_factory_.GetWeakPtr());
+ upload_scheduler_.reset(new MetricsUploadScheduler(send_next_log_callback));
+}
+
+void ReportingService::Start() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ if (reporting_active_)
+ upload_scheduler_->Start();
+}
+
+void ReportingService::Stop() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ if (upload_scheduler_)
+ upload_scheduler_->Stop();
+}
+
+void ReportingService::EnableReporting() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ if (reporting_active_)
+ return;
+ reporting_active_ = true;
+ Start();
+}
+
+void ReportingService::DisableReporting() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ reporting_active_ = false;
+ Stop();
+}
+
+bool ReportingService::reporting_active() const {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ return reporting_active_;
+}
+
+void ReportingService::UpdateMetricsUsagePrefs(const std::string& service_name,
+ int message_size,
+ bool is_cellular) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ if (data_use_tracker_) {
+ data_use_tracker_->UpdateMetricsUsagePrefs(service_name, message_size,
+ is_cellular);
+ }
+}
+
+//------------------------------------------------------------------------------
+// private methods
+//------------------------------------------------------------------------------
+
+void ReportingService::SendNextLog() {
+ DVLOG(1) << "SendNextLog";
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ if (!last_upload_finish_time_.is_null()) {
+ LogActualUploadInterval(base::TimeTicks::Now() - last_upload_finish_time_);
+ last_upload_finish_time_ = base::TimeTicks();
+ }
+ if (!reporting_active()) {
+ upload_scheduler_->StopAndUploadCancelled();
+ return;
+ }
+ if (!log_store()->has_unsent_logs()) {
+ // Should only get here if serializing the log failed somehow.
+ upload_scheduler_->Stop();
+ // Reset backoff interval
+ upload_scheduler_->UploadFinished(true);
+ return;
+ }
+ if (!log_store()->has_staged_log()) {
+ reporting_info_.set_attempt_count(0);
+ log_store()->StageNextLog();
+ }
+
+ // Proceed to stage the log for upload if log size satisfies cellular log
+ // upload constrains.
+ bool upload_canceled = false;
+ bool is_cellular_logic = client_->IsUMACellularUploadLogicEnabled();
+ if (is_cellular_logic && data_use_tracker_ &&
+ !data_use_tracker_->ShouldUploadLogOnCellular(
+ log_store()->staged_log_hash().size())) {
+ upload_scheduler_->UploadOverDataUsageCap();
+ upload_canceled = true;
+ } else {
+ SendStagedLog();
+ }
+ if (is_cellular_logic) {
+ LogCellularConstraint(upload_canceled);
+ }
+}
+
+void ReportingService::SendStagedLog() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK(log_store()->has_staged_log());
+ if (!log_store()->has_staged_log())
+ return;
+
+ DCHECK(!log_upload_in_progress_);
+ log_upload_in_progress_ = true;
+
+ if (!log_uploader_) {
+ log_uploader_ = client_->CreateUploader(
+ GetUploadUrl(), GetInsecureUploadUrl(), upload_mime_type(),
+ service_type(),
+ base::Bind(&ReportingService::OnLogUploadComplete,
+ self_ptr_factory_.GetWeakPtr()));
+ }
+
+ reporting_info_.set_attempt_count(reporting_info_.attempt_count() + 1);
+
+ const std::string hash =
+ base::HexEncode(log_store()->staged_log_hash().data(),
+ log_store()->staged_log_hash().size());
+ log_uploader_->UploadLog(log_store()->staged_log(), hash, reporting_info_);
+}
+
+void ReportingService::OnLogUploadComplete(int response_code,
+ int error_code,
+ bool was_https) {
+ DVLOG(1) << "OnLogUploadComplete:" << response_code;
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK(log_upload_in_progress_);
+ log_upload_in_progress_ = false;
+
+ reporting_info_.set_last_response_code(response_code);
+ reporting_info_.set_last_error_code(error_code);
+ reporting_info_.set_last_attempt_was_https(was_https);
+
+ // Log a histogram to track response success vs. failure rates.
+ LogResponseOrErrorCode(response_code, error_code, was_https);
+
+ bool upload_succeeded = response_code == 200;
+
+ // Staged log could have been removed already (such as by Purge() in some
+ // implementations), otherwise we may remove it here.
+ if (log_store()->has_staged_log()) {
+ // Provide boolean for error recovery (allow us to ignore response_code).
+ bool discard_log = false;
+ const size_t log_size = log_store()->staged_log().length();
+ if (upload_succeeded) {
+ LogSuccess(log_size);
+ } else if (log_size > max_retransmit_size_) {
+ LogLargeRejection(log_size);
+ discard_log = true;
+ } else if (response_code == 400) {
+ // Bad syntax. Retransmission won't work.
+ discard_log = true;
+ }
+
+ if (upload_succeeded || discard_log) {
+ log_store()->DiscardStagedLog();
+ // Store the updated list to disk now that the removed log is uploaded.
+ log_store()->PersistUnsentLogs();
+ }
+ }
+
+ // Error 400 indicates a problem with the log, not with the server, so
+ // don't consider that a sign that the server is in trouble.
+ bool server_is_healthy = upload_succeeded || response_code == 400;
+
+ if (!log_store()->has_unsent_logs()) {
+ DVLOG(1) << "Stopping upload_scheduler_.";
+ upload_scheduler_->Stop();
+ }
+ upload_scheduler_->UploadFinished(server_is_healthy);
+}
+
+} // namespace metrics
diff --git a/components/metrics/reporting_service.h b/components/metrics/reporting_service.h
new file mode 100644
index 0000000..d724430
--- /dev/null
+++ b/components/metrics/reporting_service.h
@@ -0,0 +1,153 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file defines a service that sends metrics logs to a server.
+
+#ifndef COMPONENTS_METRICS_REPORTING_SERVICE_H_
+#define COMPONENTS_METRICS_REPORTING_SERVICE_H_
+
+#include <stdint.h>
+
+#include <string>
+
+#include "base/macros.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+#include "components/metrics/data_use_tracker.h"
+#include "components/metrics/metrics_log_uploader.h"
+#include "third_party/metrics_proto/reporting_info.pb.h"
+
+class PrefService;
+class PrefRegistrySimple;
+
+namespace metrics {
+
+class LogStore;
+class MetricsUploadScheduler;
+class MetricsServiceClient;
+
+// ReportingService is an abstract class which uploads serialized logs from a
+// LogStore to a remote server. A concrete implementation of this class must
+// provide the specific LogStore and parameters for the MetricsLogUploader, and
+// can also implement hooks to record histograms based on certain events that
+// occur while attempting to upload logs.
+class ReportingService {
+ public:
+ // Creates a ReportingService with the given |client|, |local_state|, and
+ // |max_retransmit_size|. Does not take ownership of the parameters; instead
+ // it stores a weak pointer to each. Caller should ensure that the parameters
+ // are valid for the lifetime of this class.
+ ReportingService(MetricsServiceClient* client,
+ PrefService* local_state,
+ size_t max_retransmit_size);
+ virtual ~ReportingService();
+
+ // Completes setup tasks that can't be done at construction time.
+ // Loads persisted logs and creates the MetricsUploadScheduler.
+ void Initialize();
+
+ // Starts the metrics reporting system.
+ // Should be called when metrics enabled or new logs are created.
+ // When the service is already running, this is a safe no-op.
+ void Start();
+
+ // Shuts down the metrics system. Should be called at shutdown, or if metrics
+ // are turned off.
+ void Stop();
+
+ // Enable/disable transmission of accumulated logs and crash reports (dumps).
+ // Calling Start() automatically enables reporting, but sending is
+ // asyncronous so this can be called immediately after Start() to prevent
+ // any uploading.
+ void EnableReporting();
+ void DisableReporting();
+
+ // True iff reporting is currently enabled.
+ bool reporting_active() const;
+
+ // Updates data usage tracking prefs with the specified values.
+ void UpdateMetricsUsagePrefs(const std::string& service_name,
+ int message_size,
+ bool is_cellular);
+
+ // Registers local state prefs used by this class. This should only be called
+ // once.
+ static void RegisterPrefs(PrefRegistrySimple* registry);
+
+ protected:
+ MetricsServiceClient* client() const { return client_; };
+
+ private:
+ // Retrieves the log store backing this service.
+ virtual LogStore* log_store() = 0;
+
+ // Getters for MetricsLogUploader parameters.
+ virtual std::string GetUploadUrl() const = 0;
+ virtual std::string GetInsecureUploadUrl() const = 0;
+ virtual base::StringPiece upload_mime_type() const = 0;
+ virtual MetricsLogUploader::MetricServiceType service_type() const = 0;
+
+ // Methods for recording data to histograms.
+ virtual void LogActualUploadInterval(base::TimeDelta interval) {}
+ virtual void LogCellularConstraint(bool upload_canceled) {}
+ virtual void LogResponseOrErrorCode(int response_code,
+ int error_code,
+ bool was_https) {}
+ virtual void LogSuccess(size_t log_size) {}
+ virtual void LogLargeRejection(size_t log_size) {}
+
+ // If recording is enabled, begins uploading the next completed log from
+ // the log manager, staging it if necessary.
+ void SendNextLog();
+
+ // Uploads the currently staged log (which must be non-null).
+ void SendStagedLog();
+
+ // Called after transmission completes (either successfully or with failure).
+ void OnLogUploadComplete(int response_code, int error_code, bool was_https);
+
+ // Used to interact with the embedder. Weak pointer; must outlive |this|
+ // instance.
+ MetricsServiceClient* const client_;
+
+ // Largest log size to attempt to retransmit.
+ size_t max_retransmit_size_;
+
+ // Indicate whether recording and reporting are currently happening.
+ // These should not be set directly, but by calling SetRecording and
+ // SetReporting.
+ bool reporting_active_;
+
+ // Instance of the helper class for uploading logs.
+ std::unique_ptr<MetricsLogUploader> log_uploader_;
+
+ // Whether there is a current log upload in progress.
+ bool log_upload_in_progress_;
+
+ // The scheduler for determining when uploads should happen.
+ std::unique_ptr<MetricsUploadScheduler> upload_scheduler_;
+
+ // Pointer used for obtaining data use pref updater callback on above layers.
+ std::unique_ptr<DataUseTracker> data_use_tracker_;
+
+ // The tick count of the last time log upload has been finished and null if no
+ // upload has been done yet.
+ base::TimeTicks last_upload_finish_time_;
+
+ // Info on current reporting state to send along with reports.
+ ReportingInfo reporting_info_;
+
+ SEQUENCE_CHECKER(sequence_checker_);
+
+ // Weak pointers factory used to post task on different threads. All weak
+ // pointers managed by this factory have the same lifetime as
+ // ReportingService.
+ base::WeakPtrFactory<ReportingService> self_ptr_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(ReportingService);
+};
+
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_REPORTING_SERVICE_H_
diff --git a/components/metrics/reporting_service_unittest.cc b/components/metrics/reporting_service_unittest.cc
new file mode 100644
index 0000000..fb838cb
--- /dev/null
+++ b/components/metrics/reporting_service_unittest.cc
@@ -0,0 +1,142 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/reporting_service.h"
+
+#include <stdint.h>
+
+#include <deque>
+#include <memory>
+#include <string>
+
+#include "base/bind.h"
+#include "base/macros.h"
+#include "base/sha1.h"
+#include "base/test/test_simple_task_runner.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "components/metrics/log_store.h"
+#include "components/metrics/test_metrics_service_client.h"
+#include "components/prefs/testing_pref_service.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/zlib/google/compression_utils.h"
+
+namespace metrics {
+
+namespace {
+
+const char kTestUploadUrl[] = "test_url";
+const char kTestMimeType[] = "test_mime_type";
+
+class TestLogStore : public LogStore {
+ public:
+ TestLogStore() {}
+ ~TestLogStore() {}
+
+ void AddLog(const std::string& log) { logs_.push_back(log); }
+
+ // LogStore:
+ bool has_unsent_logs() const override { return !logs_.empty(); }
+ bool has_staged_log() const override { return !staged_log_hash_.empty(); }
+ const std::string& staged_log() const override { return logs_.front(); }
+ const std::string& staged_log_hash() const override {
+ return staged_log_hash_;
+ }
+ void StageNextLog() override {
+ if (has_unsent_logs())
+ staged_log_hash_ = base::SHA1HashString(logs_.front());
+ }
+ void DiscardStagedLog() override {
+ if (!has_staged_log())
+ return;
+ logs_.pop_front();
+ staged_log_hash_.clear();
+ }
+ void PersistUnsentLogs() const override {}
+ void LoadPersistedUnsentLogs() override {}
+
+ private:
+ std::string staged_log_hash_;
+ std::deque<std::string> logs_;
+};
+
+class TestReportingService : public ReportingService {
+ public:
+ TestReportingService(MetricsServiceClient* client, PrefService* local_state)
+ : ReportingService(client, local_state, 100) {
+ Initialize();
+ }
+ ~TestReportingService() override {}
+
+ void AddLog(const std::string& log) { log_store_.AddLog(log); }
+
+ private:
+ // ReportingService:
+ LogStore* log_store() override { return &log_store_; }
+ std::string GetUploadUrl() const override { return kTestUploadUrl; }
+ std::string GetInsecureUploadUrl() const override { return kTestUploadUrl; }
+ base::StringPiece upload_mime_type() const override { return kTestMimeType; }
+ MetricsLogUploader::MetricServiceType service_type() const override {
+ return MetricsLogUploader::MetricServiceType::UMA;
+ }
+
+ TestLogStore log_store_;
+
+ DISALLOW_COPY_AND_ASSIGN(TestReportingService);
+};
+
+class ReportingServiceTest : public testing::Test {
+ public:
+ ReportingServiceTest()
+ : task_runner_(new base::TestSimpleTaskRunner),
+ task_runner_handle_(task_runner_) {
+ ReportingService::RegisterPrefs(testing_local_state_.registry());
+ }
+
+ ~ReportingServiceTest() override {}
+
+ PrefService* GetLocalState() { return &testing_local_state_; }
+
+ protected:
+ scoped_refptr<base::TestSimpleTaskRunner> task_runner_;
+ base::ThreadTaskRunnerHandle task_runner_handle_;
+ TestMetricsServiceClient client_;
+
+ private:
+ TestingPrefServiceSimple testing_local_state_;
+
+ DISALLOW_COPY_AND_ASSIGN(ReportingServiceTest);
+};
+
+} // namespace
+
+TEST_F(ReportingServiceTest, BasicTest) {
+ TestReportingService service(&client_, GetLocalState());
+ service.AddLog("log1");
+ service.AddLog("log2");
+
+ service.EnableReporting();
+ task_runner_->RunPendingTasks();
+ client_.uploader()->is_uploading();
+ EXPECT_TRUE(client_.uploader()->is_uploading());
+ EXPECT_EQ(1, client_.uploader()->reporting_info().attempt_count());
+ EXPECT_FALSE(client_.uploader()->reporting_info().has_last_response_code());
+
+ client_.uploader()->CompleteUpload(404);
+ task_runner_->RunPendingTasks();
+ EXPECT_TRUE(client_.uploader()->is_uploading());
+ EXPECT_EQ(2, client_.uploader()->reporting_info().attempt_count());
+ EXPECT_EQ(404, client_.uploader()->reporting_info().last_response_code());
+
+ client_.uploader()->CompleteUpload(200);
+ task_runner_->RunPendingTasks();
+ EXPECT_TRUE(client_.uploader()->is_uploading());
+ EXPECT_EQ(1, client_.uploader()->reporting_info().attempt_count());
+ EXPECT_EQ(200, client_.uploader()->reporting_info().last_response_code());
+
+ client_.uploader()->CompleteUpload(200);
+ EXPECT_EQ(0U, task_runner_->NumPendingTasks());
+ EXPECT_FALSE(client_.uploader()->is_uploading());
+}
+
+} // namespace metrics
diff --git a/components/metrics/serialization/metric_sample.cc b/components/metrics/serialization/metric_sample.cc
new file mode 100644
index 0000000..24f3757
--- /dev/null
+++ b/components/metrics/serialization/metric_sample.cc
@@ -0,0 +1,196 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/serialization/metric_sample.h"
+
+#include <string>
+#include <vector>
+
+#include "base/logging.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_split.h"
+#include "base/strings/stringprintf.h"
+
+namespace metrics {
+
+MetricSample::MetricSample(MetricSample::SampleType sample_type,
+ const std::string& metric_name,
+ int sample,
+ int min,
+ int max,
+ int bucket_count)
+ : type_(sample_type),
+ name_(metric_name),
+ sample_(sample),
+ min_(min),
+ max_(max),
+ bucket_count_(bucket_count) {
+}
+
+MetricSample::~MetricSample() {
+}
+
+bool MetricSample::IsValid() const {
+ return name().find(' ') == std::string::npos &&
+ name().find('\0') == std::string::npos && !name().empty();
+}
+
+std::string MetricSample::ToString() const {
+ if (type_ == CRASH) {
+ return base::StringPrintf("crash%c%s%c",
+ '\0',
+ name().c_str(),
+ '\0');
+ }
+ if (type_ == SPARSE_HISTOGRAM) {
+ return base::StringPrintf("sparsehistogram%c%s %d%c",
+ '\0',
+ name().c_str(),
+ sample_,
+ '\0');
+ }
+ if (type_ == LINEAR_HISTOGRAM) {
+ return base::StringPrintf("linearhistogram%c%s %d %d%c",
+ '\0',
+ name().c_str(),
+ sample_,
+ max_,
+ '\0');
+ }
+ if (type_ == HISTOGRAM) {
+ return base::StringPrintf("histogram%c%s %d %d %d %d%c",
+ '\0',
+ name().c_str(),
+ sample_,
+ min_,
+ max_,
+ bucket_count_,
+ '\0');
+ }
+ // The type can only be USER_ACTION.
+ CHECK_EQ(type_, USER_ACTION);
+ return base::StringPrintf("useraction%c%s%c", '\0', name().c_str(), '\0');
+}
+
+int MetricSample::sample() const {
+ CHECK_NE(type_, USER_ACTION);
+ CHECK_NE(type_, CRASH);
+ return sample_;
+}
+
+int MetricSample::min() const {
+ CHECK_EQ(type_, HISTOGRAM);
+ return min_;
+}
+
+int MetricSample::max() const {
+ CHECK_NE(type_, CRASH);
+ CHECK_NE(type_, USER_ACTION);
+ CHECK_NE(type_, SPARSE_HISTOGRAM);
+ return max_;
+}
+
+int MetricSample::bucket_count() const {
+ CHECK_EQ(type_, HISTOGRAM);
+ return bucket_count_;
+}
+
+// static
+std::unique_ptr<MetricSample> MetricSample::CrashSample(
+ const std::string& crash_name) {
+ return std::unique_ptr<MetricSample>(
+ new MetricSample(CRASH, crash_name, 0, 0, 0, 0));
+}
+
+// static
+std::unique_ptr<MetricSample> MetricSample::HistogramSample(
+ const std::string& histogram_name,
+ int sample,
+ int min,
+ int max,
+ int bucket_count) {
+ return std::unique_ptr<MetricSample>(new MetricSample(
+ HISTOGRAM, histogram_name, sample, min, max, bucket_count));
+}
+
+// static
+std::unique_ptr<MetricSample> MetricSample::ParseHistogram(
+ const std::string& serialized_histogram) {
+ std::vector<base::StringPiece> parts = base::SplitStringPiece(
+ serialized_histogram, " ", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
+
+ if (parts.size() != 5)
+ return std::unique_ptr<MetricSample>();
+ int sample, min, max, bucket_count;
+ if (parts[0].empty() || !base::StringToInt(parts[1], &sample) ||
+ !base::StringToInt(parts[2], &min) ||
+ !base::StringToInt(parts[3], &max) ||
+ !base::StringToInt(parts[4], &bucket_count)) {
+ return std::unique_ptr<MetricSample>();
+ }
+
+ return HistogramSample(parts[0].as_string(), sample, min, max, bucket_count);
+}
+
+// static
+std::unique_ptr<MetricSample> MetricSample::SparseHistogramSample(
+ const std::string& histogram_name,
+ int sample) {
+ return std::unique_ptr<MetricSample>(
+ new MetricSample(SPARSE_HISTOGRAM, histogram_name, sample, 0, 0, 0));
+}
+
+// static
+std::unique_ptr<MetricSample> MetricSample::ParseSparseHistogram(
+ const std::string& serialized_histogram) {
+ std::vector<base::StringPiece> parts = base::SplitStringPiece(
+ serialized_histogram, " ", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
+ if (parts.size() != 2)
+ return std::unique_ptr<MetricSample>();
+ int sample;
+ if (parts[0].empty() || !base::StringToInt(parts[1], &sample))
+ return std::unique_ptr<MetricSample>();
+
+ return SparseHistogramSample(parts[0].as_string(), sample);
+}
+
+// static
+std::unique_ptr<MetricSample> MetricSample::LinearHistogramSample(
+ const std::string& histogram_name,
+ int sample,
+ int max) {
+ return std::unique_ptr<MetricSample>(
+ new MetricSample(LINEAR_HISTOGRAM, histogram_name, sample, 0, max, 0));
+}
+
+// static
+std::unique_ptr<MetricSample> MetricSample::ParseLinearHistogram(
+ const std::string& serialized_histogram) {
+ std::vector<base::StringPiece> parts = base::SplitStringPiece(
+ serialized_histogram, " ", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
+ int sample, max;
+ if (parts.size() != 3)
+ return std::unique_ptr<MetricSample>();
+ if (parts[0].empty() || !base::StringToInt(parts[1], &sample) ||
+ !base::StringToInt(parts[2], &max)) {
+ return std::unique_ptr<MetricSample>();
+ }
+
+ return LinearHistogramSample(parts[0].as_string(), sample, max);
+}
+
+// static
+std::unique_ptr<MetricSample> MetricSample::UserActionSample(
+ const std::string& action_name) {
+ return std::unique_ptr<MetricSample>(
+ new MetricSample(USER_ACTION, action_name, 0, 0, 0, 0));
+}
+
+bool MetricSample::IsEqual(const MetricSample& metric) {
+ return type_ == metric.type_ && name_ == metric.name_ &&
+ sample_ == metric.sample_ && min_ == metric.min_ &&
+ max_ == metric.max_ && bucket_count_ == metric.bucket_count_;
+}
+
+} // namespace metrics
diff --git a/components/metrics/serialization/metric_sample.h b/components/metrics/serialization/metric_sample.h
new file mode 100644
index 0000000..5a64c41
--- /dev/null
+++ b/components/metrics/serialization/metric_sample.h
@@ -0,0 +1,118 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_SERIALIZATION_METRIC_SAMPLE_H_
+#define COMPONENTS_METRICS_SERIALIZATION_METRIC_SAMPLE_H_
+
+#include <memory>
+#include <string>
+
+#include "base/macros.h"
+
+namespace metrics {
+
+// This class is used by libmetrics (ChromeOS) to serialize
+// and deserialize measurements to send them to a metrics sending service.
+// It is meant to be a simple container with serialization functions.
+class MetricSample {
+ public:
+ // Types of metric sample used.
+ enum SampleType {
+ CRASH,
+ HISTOGRAM,
+ LINEAR_HISTOGRAM,
+ SPARSE_HISTOGRAM,
+ USER_ACTION
+ };
+
+ ~MetricSample();
+
+ // Returns true if the sample is valid (can be serialized without ambiguity).
+ //
+ // This function should be used to filter bad samples before serializing them.
+ bool IsValid() const;
+
+ // Getters for type and name. All types of metrics have these so we do not
+ // need to check the type.
+ SampleType type() const { return type_; }
+ const std::string& name() const { return name_; }
+
+ // Getters for sample, min, max, bucket_count.
+ // Check the metric type to make sure the request make sense. (ex: a crash
+ // sample does not have a bucket_count so we crash if we call bucket_count()
+ // on it.)
+ int sample() const;
+ int min() const;
+ int max() const;
+ int bucket_count() const;
+
+ // Returns a serialized version of the sample.
+ //
+ // The serialized message for each type is:
+ // crash: crash\0|name_|\0
+ // user action: useraction\0|name_|\0
+ // histogram: histogram\0|name_| |sample_| |min_| |max_| |bucket_count_|\0
+ // sparsehistogram: sparsehistogram\0|name_| |sample_|\0
+ // linearhistogram: linearhistogram\0|name_| |sample_| |max_|\0
+ std::string ToString() const;
+
+ // Builds a crash sample.
+ static std::unique_ptr<MetricSample> CrashSample(
+ const std::string& crash_name);
+
+ // Builds a histogram sample.
+ static std::unique_ptr<MetricSample> HistogramSample(
+ const std::string& histogram_name,
+ int sample,
+ int min,
+ int max,
+ int bucket_count);
+ // Deserializes a histogram sample.
+ static std::unique_ptr<MetricSample> ParseHistogram(
+ const std::string& serialized);
+
+ // Builds a sparse histogram sample.
+ static std::unique_ptr<MetricSample> SparseHistogramSample(
+ const std::string& histogram_name,
+ int sample);
+ // Deserializes a sparse histogram sample.
+ static std::unique_ptr<MetricSample> ParseSparseHistogram(
+ const std::string& serialized);
+
+ // Builds a linear histogram sample.
+ static std::unique_ptr<MetricSample>
+ LinearHistogramSample(const std::string& histogram_name, int sample, int max);
+ // Deserializes a linear histogram sample.
+ static std::unique_ptr<MetricSample> ParseLinearHistogram(
+ const std::string& serialized);
+
+ // Builds a user action sample.
+ static std::unique_ptr<MetricSample> UserActionSample(
+ const std::string& action_name);
+
+ // Returns true if sample and this object represent the same sample (type,
+ // name, sample, min, max, bucket_count match).
+ bool IsEqual(const MetricSample& sample);
+
+ private:
+ MetricSample(SampleType sample_type,
+ const std::string& metric_name,
+ const int sample,
+ const int min,
+ const int max,
+ const int bucket_count);
+
+ const SampleType type_;
+ const std::string name_;
+ const int sample_;
+ const int min_;
+ const int max_;
+ const int bucket_count_;
+
+ DISALLOW_COPY_AND_ASSIGN(MetricSample);
+};
+
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_SERIALIZATION_METRIC_SAMPLE_H_
diff --git a/components/metrics/serialization/serialization_utils.cc b/components/metrics/serialization/serialization_utils.cc
new file mode 100644
index 0000000..7214b5d
--- /dev/null
+++ b/components/metrics/serialization/serialization_utils.cc
@@ -0,0 +1,219 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/serialization/serialization_utils.h"
+
+#include <errno.h>
+#include <stdint.h>
+#include <sys/file.h>
+
+#include <utility>
+
+#include "base/files/file_path.h"
+#include "base/files/file_util.h"
+#include "base/files/scoped_file.h"
+#include "base/logging.h"
+#include "base/strings/string_split.h"
+#include "base/strings/string_util.h"
+#include "components/metrics/serialization/metric_sample.h"
+
+#define READ_WRITE_ALL_FILE_FLAGS \
+ (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH)
+
+namespace metrics {
+namespace {
+
+// Reads the next message from |file_descriptor| into |message|.
+//
+// |message| will be set to the empty string if no message could be read (EOF)
+// or the message was badly constructed.
+//
+// Returns false if no message can be read from this file anymore (EOF or
+// unrecoverable error).
+bool ReadMessage(int fd, std::string* message) {
+ CHECK(message);
+
+ int result;
+ int32_t message_size;
+ const int32_t message_header_size = sizeof(message_size);
+ // The file containing the metrics does not leave the device so the writer and
+ // the reader will always have the same endianness.
+ result = HANDLE_EINTR(read(fd, &message_size, message_header_size));
+ if (result < 0) {
+ DPLOG(ERROR) << "reading metrics message header";
+ return false;
+ }
+ if (result == 0) {
+ // This indicates a normal EOF.
+ return false;
+ }
+ if (result < message_header_size) {
+ DLOG(ERROR) << "bad read size " << result << ", expecting "
+ << sizeof(message_size);
+ return false;
+ }
+
+ // kMessageMaxLength applies to the entire message: the 4-byte
+ // length field and the content.
+ if (message_size > SerializationUtils::kMessageMaxLength) {
+ DLOG(ERROR) << "message too long : " << message_size;
+ if (HANDLE_EINTR(lseek(fd, message_size - 4, SEEK_CUR)) == -1) {
+ DLOG(ERROR) << "error while skipping message. abort";
+ return false;
+ }
+ // Badly formatted message was skipped. Treat the badly formatted sample as
+ // an empty sample.
+ message->clear();
+ return true;
+ }
+
+ if (message_size < message_header_size) {
+ DLOG(ERROR) << "message too short : " << message_size;
+ return false;
+ }
+
+ message_size -= message_header_size; // The message size includes itself.
+ char buffer[SerializationUtils::kMessageMaxLength];
+ if (!base::ReadFromFD(fd, buffer, message_size)) {
+ DPLOG(ERROR) << "reading metrics message body";
+ return false;
+ }
+ *message = std::string(buffer, message_size);
+ return true;
+}
+
+} // namespace
+
+std::unique_ptr<MetricSample> SerializationUtils::ParseSample(
+ const std::string& sample) {
+ if (sample.empty())
+ return std::unique_ptr<MetricSample>();
+
+ std::vector<std::string> parts = base::SplitString(
+ sample, std::string(1, '\0'),
+ base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
+ // We should have two null terminated strings so split should produce
+ // three chunks.
+ if (parts.size() != 3) {
+ DLOG(ERROR) << "splitting message on \\0 produced " << parts.size()
+ << " parts (expected 3)";
+ return std::unique_ptr<MetricSample>();
+ }
+ const std::string& name = parts[0];
+ const std::string& value = parts[1];
+
+ if (base::LowerCaseEqualsASCII(name, "crash"))
+ return MetricSample::CrashSample(value);
+ if (base::LowerCaseEqualsASCII(name, "histogram"))
+ return MetricSample::ParseHistogram(value);
+ if (base::LowerCaseEqualsASCII(name, "linearhistogram"))
+ return MetricSample::ParseLinearHistogram(value);
+ if (base::LowerCaseEqualsASCII(name, "sparsehistogram"))
+ return MetricSample::ParseSparseHistogram(value);
+ if (base::LowerCaseEqualsASCII(name, "useraction"))
+ return MetricSample::UserActionSample(value);
+ DLOG(ERROR) << "invalid event type: " << name << ", value: " << value;
+ return std::unique_ptr<MetricSample>();
+}
+
+void SerializationUtils::ReadAndTruncateMetricsFromFile(
+ const std::string& filename,
+ std::vector<std::unique_ptr<MetricSample>>* metrics) {
+ struct stat stat_buf;
+ int result;
+
+ result = stat(filename.c_str(), &stat_buf);
+ if (result < 0) {
+ if (errno != ENOENT)
+ DPLOG(ERROR) << "bad metrics file stat: " << filename;
+
+ // Nothing to collect---try later.
+ return;
+ }
+ if (stat_buf.st_size == 0) {
+ // Also nothing to collect.
+ return;
+ }
+ base::ScopedFD fd(open(filename.c_str(), O_RDWR));
+ if (fd.get() < 0) {
+ DPLOG(ERROR) << "cannot open: " << filename;
+ return;
+ }
+ result = flock(fd.get(), LOCK_EX);
+ if (result < 0) {
+ DPLOG(ERROR) << "cannot lock: " << filename;
+ return;
+ }
+
+ // This processes all messages in the log. When all messages are
+ // read and processed, or an error occurs, truncate the file to zero size.
+ for (;;) {
+ std::string message;
+
+ if (!ReadMessage(fd.get(), &message))
+ break;
+
+ std::unique_ptr<MetricSample> sample = ParseSample(message);
+ if (sample)
+ metrics->push_back(std::move(sample));
+ }
+
+ result = ftruncate(fd.get(), 0);
+ if (result < 0)
+ DPLOG(ERROR) << "truncate metrics log: " << filename;
+
+ result = flock(fd.get(), LOCK_UN);
+ if (result < 0)
+ DPLOG(ERROR) << "unlock metrics log: " << filename;
+}
+
+bool SerializationUtils::WriteMetricToFile(const MetricSample& sample,
+ const std::string& filename) {
+ if (!sample.IsValid())
+ return false;
+
+ base::ScopedFD file_descriptor(open(filename.c_str(),
+ O_WRONLY | O_APPEND | O_CREAT,
+ READ_WRITE_ALL_FILE_FLAGS));
+
+ if (file_descriptor.get() < 0) {
+ DPLOG(ERROR) << "error opening the file: " << filename;
+ return false;
+ }
+
+ fchmod(file_descriptor.get(), READ_WRITE_ALL_FILE_FLAGS);
+ // Grab a lock to avoid chrome truncating the file
+ // underneath us. Keep the file locked as briefly as possible.
+ // Freeing file_descriptor will close the file and and remove the lock.
+ if (HANDLE_EINTR(flock(file_descriptor.get(), LOCK_EX)) < 0) {
+ DPLOG(ERROR) << "error locking: " << filename;
+ return false;
+ }
+
+ std::string msg = sample.ToString();
+ int32_t size = msg.length() + sizeof(int32_t);
+ if (size > kMessageMaxLength) {
+ DPLOG(ERROR) << "cannot write message: too long: " << filename;
+ return false;
+ }
+
+ // The file containing the metrics samples will only be read by programs on
+ // the same device so we do not check endianness.
+ if (!base::WriteFileDescriptor(file_descriptor.get(),
+ reinterpret_cast<char*>(&size),
+ sizeof(size))) {
+ DPLOG(ERROR) << "error writing message length: " << filename;
+ return false;
+ }
+
+ if (!base::WriteFileDescriptor(
+ file_descriptor.get(), msg.c_str(), msg.size())) {
+ DPLOG(ERROR) << "error writing message: " << filename;
+ return false;
+ }
+
+ return true;
+}
+
+} // namespace metrics
diff --git a/components/metrics/serialization/serialization_utils.h b/components/metrics/serialization/serialization_utils.h
new file mode 100644
index 0000000..c741cb2
--- /dev/null
+++ b/components/metrics/serialization/serialization_utils.h
@@ -0,0 +1,48 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_SERIALIZATION_SERIALIZATION_UTILS_H_
+#define COMPONENTS_METRICS_SERIALIZATION_SERIALIZATION_UTILS_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+namespace metrics {
+
+class MetricSample;
+
+// Metrics helpers to serialize and deserialize metrics collected by
+// ChromeOS.
+namespace SerializationUtils {
+
+// Deserializes a sample passed as a string and return a sample.
+// The return value will either be a scoped_ptr to a Metric sample (if the
+// deserialization was successful) or a NULL scoped_ptr.
+std::unique_ptr<MetricSample> ParseSample(const std::string& sample);
+
+// Reads all samples from a file and truncate the file when done.
+void ReadAndTruncateMetricsFromFile(
+ const std::string& filename,
+ std::vector<std::unique_ptr<MetricSample>>* metrics);
+
+// Serializes a sample and write it to filename.
+// The format for the message is:
+// message_size, serialized_message
+// where
+// * message_size is the total length of the message (message_size +
+// serialized_message) on 4 bytes
+// * serialized_message is the serialized version of sample (using ToString)
+//
+// NB: the file will never leave the device so message_size will be written
+// with the architecture's endianness.
+bool WriteMetricToFile(const MetricSample& sample, const std::string& filename);
+
+// Maximum length of a serialized message
+static const int kMessageMaxLength = 1024;
+
+} // namespace SerializationUtils
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_SERIALIZATION_SERIALIZATION_UTILS_H_
diff --git a/components/metrics/serialization/serialization_utils_unittest.cc b/components/metrics/serialization/serialization_utils_unittest.cc
new file mode 100644
index 0000000..5685a1f
--- /dev/null
+++ b/components/metrics/serialization/serialization_utils_unittest.cc
@@ -0,0 +1,172 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/serialization/serialization_utils.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "base/files/file_util.h"
+#include "base/files/scoped_temp_dir.h"
+#include "base/logging.h"
+#include "base/strings/stringprintf.h"
+#include "components/metrics/serialization/metric_sample.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace metrics {
+namespace {
+
+class SerializationUtilsTest : public testing::Test {
+ protected:
+ SerializationUtilsTest() {
+ bool success = temporary_dir.CreateUniqueTempDir();
+ if (success) {
+ base::FilePath dir_path = temporary_dir.GetPath();
+ filename = dir_path.value() + "chromeossampletest";
+ filepath = base::FilePath(filename);
+ }
+ }
+
+ void SetUp() override { base::DeleteFile(filepath, false); }
+
+ void TestSerialization(MetricSample* sample) {
+ std::string serialized(sample->ToString());
+ ASSERT_EQ('\0', serialized.back());
+ std::unique_ptr<MetricSample> deserialized =
+ SerializationUtils::ParseSample(serialized);
+ ASSERT_TRUE(deserialized);
+ EXPECT_TRUE(sample->IsEqual(*deserialized.get()));
+ }
+
+ std::string filename;
+ base::ScopedTempDir temporary_dir;
+ base::FilePath filepath;
+};
+
+TEST_F(SerializationUtilsTest, CrashSerializeTest) {
+ TestSerialization(MetricSample::CrashSample("test").get());
+}
+
+TEST_F(SerializationUtilsTest, HistogramSerializeTest) {
+ TestSerialization(
+ MetricSample::HistogramSample("myhist", 13, 1, 100, 10).get());
+}
+
+TEST_F(SerializationUtilsTest, LinearSerializeTest) {
+ TestSerialization(
+ MetricSample::LinearHistogramSample("linearhist", 12, 30).get());
+}
+
+TEST_F(SerializationUtilsTest, SparseSerializeTest) {
+ TestSerialization(MetricSample::SparseHistogramSample("mysparse", 30).get());
+}
+
+TEST_F(SerializationUtilsTest, UserActionSerializeTest) {
+ TestSerialization(MetricSample::UserActionSample("myaction").get());
+}
+
+TEST_F(SerializationUtilsTest, IllegalNameAreFilteredTest) {
+ std::unique_ptr<MetricSample> sample1 =
+ MetricSample::SparseHistogramSample("no space", 10);
+ std::unique_ptr<MetricSample> sample2 = MetricSample::LinearHistogramSample(
+ base::StringPrintf("here%cbhe", '\0'), 1, 3);
+
+ EXPECT_FALSE(SerializationUtils::WriteMetricToFile(*sample1.get(), filename));
+ EXPECT_FALSE(SerializationUtils::WriteMetricToFile(*sample2.get(), filename));
+ int64_t size = 0;
+
+ ASSERT_TRUE(!PathExists(filepath) || base::GetFileSize(filepath, &size));
+
+ EXPECT_EQ(0, size);
+}
+
+TEST_F(SerializationUtilsTest, BadInputIsCaughtTest) {
+ std::string input(
+ base::StringPrintf("sparsehistogram%cname foo%c", '\0', '\0'));
+ EXPECT_EQ(nullptr, MetricSample::ParseSparseHistogram(input).get());
+}
+
+TEST_F(SerializationUtilsTest, MessageSeparatedByZero) {
+ std::unique_ptr<MetricSample> crash = MetricSample::CrashSample("mycrash");
+
+ SerializationUtils::WriteMetricToFile(*crash.get(), filename);
+ int64_t size = 0;
+ ASSERT_TRUE(base::GetFileSize(filepath, &size));
+ // 4 bytes for the size
+ // 5 bytes for crash
+ // 7 bytes for mycrash
+ // 2 bytes for the \0
+ // -> total of 18
+ EXPECT_EQ(size, 18);
+}
+
+TEST_F(SerializationUtilsTest, MessagesTooLongAreDiscardedTest) {
+ // Creates a message that is bigger than the maximum allowed size.
+ // As we are adding extra character (crash, \0s, etc), if the name is
+ // kMessageMaxLength long, it will be too long.
+ std::string name(SerializationUtils::kMessageMaxLength, 'c');
+
+ std::unique_ptr<MetricSample> crash = MetricSample::CrashSample(name);
+ EXPECT_FALSE(SerializationUtils::WriteMetricToFile(*crash.get(), filename));
+ int64_t size = 0;
+ ASSERT_TRUE(base::GetFileSize(filepath, &size));
+ EXPECT_EQ(0, size);
+}
+
+TEST_F(SerializationUtilsTest, ReadLongMessageTest) {
+ base::File test_file(filepath,
+ base::File::FLAG_OPEN_ALWAYS | base::File::FLAG_APPEND);
+ std::string message(SerializationUtils::kMessageMaxLength + 1, 'c');
+
+ int32_t message_size = message.length() + sizeof(int32_t);
+ test_file.WriteAtCurrentPos(reinterpret_cast<const char*>(&message_size),
+ sizeof(message_size));
+ test_file.WriteAtCurrentPos(message.c_str(), message.length());
+ test_file.Close();
+
+ std::unique_ptr<MetricSample> crash = MetricSample::CrashSample("test");
+ SerializationUtils::WriteMetricToFile(*crash.get(), filename);
+
+ std::vector<std::unique_ptr<MetricSample>> samples;
+ SerializationUtils::ReadAndTruncateMetricsFromFile(filename, &samples);
+ ASSERT_EQ(size_t(1), samples.size());
+ ASSERT_TRUE(samples[0].get() != nullptr);
+ EXPECT_TRUE(crash->IsEqual(*samples[0]));
+}
+
+TEST_F(SerializationUtilsTest, WriteReadTest) {
+ std::unique_ptr<MetricSample> hist =
+ MetricSample::HistogramSample("myhist", 1, 2, 3, 4);
+ std::unique_ptr<MetricSample> crash = MetricSample::CrashSample("mycrash");
+ std::unique_ptr<MetricSample> lhist =
+ MetricSample::LinearHistogramSample("linear", 1, 10);
+ std::unique_ptr<MetricSample> shist =
+ MetricSample::SparseHistogramSample("mysparse", 30);
+ std::unique_ptr<MetricSample> action =
+ MetricSample::UserActionSample("myaction");
+
+ SerializationUtils::WriteMetricToFile(*hist.get(), filename);
+ SerializationUtils::WriteMetricToFile(*crash.get(), filename);
+ SerializationUtils::WriteMetricToFile(*lhist.get(), filename);
+ SerializationUtils::WriteMetricToFile(*shist.get(), filename);
+ SerializationUtils::WriteMetricToFile(*action.get(), filename);
+ std::vector<std::unique_ptr<MetricSample>> vect;
+ SerializationUtils::ReadAndTruncateMetricsFromFile(filename, &vect);
+ ASSERT_EQ(vect.size(), size_t(5));
+ for (auto& sample : vect) {
+ ASSERT_NE(nullptr, sample.get());
+ }
+ EXPECT_TRUE(hist->IsEqual(*vect[0]));
+ EXPECT_TRUE(crash->IsEqual(*vect[1]));
+ EXPECT_TRUE(lhist->IsEqual(*vect[2]));
+ EXPECT_TRUE(shist->IsEqual(*vect[3]));
+ EXPECT_TRUE(action->IsEqual(*vect[4]));
+
+ int64_t size = 0;
+ ASSERT_TRUE(base::GetFileSize(filepath, &size));
+ ASSERT_EQ(0, size);
+}
+
+} // namespace
+} // namespace metrics
diff --git a/components/metrics/single_sample_metrics.cc b/components/metrics/single_sample_metrics.cc
new file mode 100644
index 0000000..201ef73
--- /dev/null
+++ b/components/metrics/single_sample_metrics.cc
@@ -0,0 +1,85 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/single_sample_metrics.h"
+
+#include <memory>
+#include <utility>
+
+#include "base/metrics/single_sample_metrics.h"
+#include "base/threading/thread_checker.h"
+#include "components/metrics/single_sample_metrics_factory_impl.h"
+#include "mojo/public/cpp/bindings/strong_binding.h"
+
+namespace metrics {
+
+namespace {
+
+class MojoSingleSampleMetric : public mojom::SingleSampleMetric {
+ public:
+ MojoSingleSampleMetric(const std::string& histogram_name,
+ base::HistogramBase::Sample min,
+ base::HistogramBase::Sample max,
+ uint32_t bucket_count,
+ int32_t flags)
+ : metric_(histogram_name, min, max, bucket_count, flags) {}
+ ~MojoSingleSampleMetric() override {}
+
+ private:
+ // mojom::SingleSampleMetric:
+ void SetSample(base::HistogramBase::Sample sample) override {
+ metric_.SetSample(sample);
+ }
+
+ base::DefaultSingleSampleMetric metric_;
+
+ DISALLOW_COPY_AND_ASSIGN(MojoSingleSampleMetric);
+};
+
+class MojoSingleSampleMetricsProvider
+ : public mojom::SingleSampleMetricsProvider {
+ public:
+ MojoSingleSampleMetricsProvider() {}
+ ~MojoSingleSampleMetricsProvider() override {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ }
+
+ private:
+ // mojom::SingleSampleMetricsProvider:
+ void AcquireSingleSampleMetric(
+ const std::string& histogram_name,
+ base::HistogramBase::Sample min,
+ base::HistogramBase::Sample max,
+ uint32_t bucket_count,
+ int32_t flags,
+ mojom::SingleSampleMetricRequest request) override {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ mojo::MakeStrongBinding(std::make_unique<MojoSingleSampleMetric>(
+ histogram_name, min, max, bucket_count, flags),
+ std::move(request));
+ }
+
+ // Providers must be created, used on, and destroyed on the same thread.
+ base::ThreadChecker thread_checker_;
+
+ DISALLOW_COPY_AND_ASSIGN(MojoSingleSampleMetricsProvider);
+};
+
+} // namespace
+
+// static
+void InitializeSingleSampleMetricsFactory(CreateProviderCB create_provider_cb) {
+ base::SingleSampleMetricsFactory::SetFactory(
+ std::make_unique<SingleSampleMetricsFactoryImpl>(
+ std::move(create_provider_cb)));
+}
+
+// static
+void CreateSingleSampleMetricsProvider(
+ mojom::SingleSampleMetricsProviderRequest request) {
+ mojo::MakeStrongBinding(std::make_unique<MojoSingleSampleMetricsProvider>(),
+ std::move(request));
+}
+
+} // namespace metrics
diff --git a/components/metrics/single_sample_metrics.h b/components/metrics/single_sample_metrics.h
new file mode 100644
index 0000000..989f6a2
--- /dev/null
+++ b/components/metrics/single_sample_metrics.h
@@ -0,0 +1,41 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_SINGLE_SAMPLE_METRICS_H_
+#define COMPONENTS_METRICS_SINGLE_SAMPLE_METRICS_H_
+
+#include "base/callback.h"
+#include "components/metrics/public/interfaces/single_sample_metrics.mojom.h"
+
+namespace metrics {
+
+using CreateProviderCB =
+ base::RepeatingCallback<void(mojom::SingleSampleMetricsProviderRequest)>;
+
+// Initializes and sets the base::SingleSampleMetricsFactory for the current
+// process. |create_provider_cb| is used to create provider instances per each
+// thread that the factory is used on; this is necessary since the underlying
+// providers must only be used on the same thread as construction.
+//
+// We use a callback here to avoid taking additional DEPS on content and a
+// service_manager::Connector() for simplicity and to avoid the need for
+// using the service test harness in metrics unittests.
+//
+// Typically this is called in the process where termination may occur without
+// warning; e.g. perhaps a renderer process.
+extern void InitializeSingleSampleMetricsFactory(
+ CreateProviderCB create_provider_cb);
+
+// Creates a mojom::SingleSampleMetricsProvider capable of vending single sample
+// metrics attached to a mojo pipe.
+//
+// Typically this is given to a service_manager::BinderRegistry in the process
+// that has a deterministic shutdown path and which serves as a stable endpoint
+// for the factory created by the above initialize method in another process.
+extern void CreateSingleSampleMetricsProvider(
+ mojom::SingleSampleMetricsProviderRequest request);
+
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_SINGLE_SAMPLE_METRICS_H_
diff --git a/components/metrics/single_sample_metrics_factory_impl.cc b/components/metrics/single_sample_metrics_factory_impl.cc
new file mode 100644
index 0000000..4ab48f8
--- /dev/null
+++ b/components/metrics/single_sample_metrics_factory_impl.cc
@@ -0,0 +1,92 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/single_sample_metrics_factory_impl.h"
+
+#include <memory>
+
+#include "base/threading/thread_checker.h"
+
+namespace metrics {
+
+namespace {
+
+class SingleSampleMetricImpl : public base::SingleSampleMetric {
+ public:
+ SingleSampleMetricImpl(mojom::SingleSampleMetricPtr metric)
+ : metric_(std::move(metric)) {}
+
+ ~SingleSampleMetricImpl() override {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ }
+
+ void SetSample(base::HistogramBase::Sample sample) override {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ metric_->SetSample(sample);
+ }
+
+ private:
+ base::ThreadChecker thread_checker_;
+ mojom::SingleSampleMetricPtr metric_;
+
+ DISALLOW_COPY_AND_ASSIGN(SingleSampleMetricImpl);
+};
+
+} // namespace
+
+SingleSampleMetricsFactoryImpl::SingleSampleMetricsFactoryImpl(
+ CreateProviderCB create_provider_cb)
+ : create_provider_cb_(std::move(create_provider_cb)) {}
+
+SingleSampleMetricsFactoryImpl::~SingleSampleMetricsFactoryImpl() {}
+
+std::unique_ptr<base::SingleSampleMetric>
+SingleSampleMetricsFactoryImpl::CreateCustomCountsMetric(
+ const std::string& histogram_name,
+ base::HistogramBase::Sample min,
+ base::HistogramBase::Sample max,
+ uint32_t bucket_count) {
+ return CreateMetric(histogram_name, min, max, bucket_count,
+ base::HistogramBase::kUmaTargetedHistogramFlag);
+}
+
+void SingleSampleMetricsFactoryImpl::DestroyProviderForTesting() {
+ if (auto* provider = provider_tls_.Get())
+ delete provider;
+ provider_tls_.Set(nullptr);
+}
+
+std::unique_ptr<base::SingleSampleMetric>
+SingleSampleMetricsFactoryImpl::CreateMetric(const std::string& histogram_name,
+ base::HistogramBase::Sample min,
+ base::HistogramBase::Sample max,
+ uint32_t bucket_count,
+ int32_t flags) {
+ mojom::SingleSampleMetricPtr metric;
+ GetProvider()->AcquireSingleSampleMetric(histogram_name, min, max,
+ bucket_count, flags,
+ mojo::MakeRequest(&metric));
+ return std::make_unique<SingleSampleMetricImpl>(std::move(metric));
+}
+
+mojom::SingleSampleMetricsProvider*
+SingleSampleMetricsFactoryImpl::GetProvider() {
+ // Check the current TLS slot to see if we have created a provider already for
+ // this thread.
+ if (auto* provider = provider_tls_.Get())
+ return provider->get();
+
+ // If not, create a new one which will persist until process shutdown and put
+ // it in the TLS slot for the current thread.
+ mojom::SingleSampleMetricsProviderPtr* provider =
+ new mojom::SingleSampleMetricsProviderPtr();
+ provider_tls_.Set(provider);
+
+ // Start the provider connection and return it; it won't be fully connected
+ // until later, but mojo will buffer all calls prior to completion.
+ create_provider_cb_.Run(mojo::MakeRequest(provider));
+ return provider->get();
+}
+
+} // namespace metrics
diff --git a/components/metrics/single_sample_metrics_factory_impl.h b/components/metrics/single_sample_metrics_factory_impl.h
new file mode 100644
index 0000000..297324f
--- /dev/null
+++ b/components/metrics/single_sample_metrics_factory_impl.h
@@ -0,0 +1,71 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_SINGLE_VALUE_HISTOGRAM_FACTORY_IMPL_H_
+#define COMPONENTS_METRICS_SINGLE_VALUE_HISTOGRAM_FACTORY_IMPL_H_
+
+#include <string>
+
+#include "base/metrics/single_sample_metrics.h"
+#include "base/threading/thread_local.h"
+#include "components/metrics/public/interfaces/single_sample_metrics.mojom.h"
+#include "components/metrics/single_sample_metrics.h"
+
+namespace metrics {
+
+// SingleSampleMetricsFactory implementation for creating SingleSampleMetric
+// instances that communicate over mojo to instances in another process.
+//
+// Persistance outside of the current process allows these metrics to record a
+// sample even in the event of sudden process termination. As an example, this
+// is useful for garbage collected objects which may never get a chance to run
+// their destructors in the event of a fast shutdown event (process kill).
+class SingleSampleMetricsFactoryImpl : public base::SingleSampleMetricsFactory {
+ public:
+ // Constructs a factory capable of vending single sample metrics from any
+ // thread. |create_provider_cb| will be called from arbitrary threads to
+ // create providers as necessary; the callback must handle thread safety.
+ //
+ // We use a callback here to avoid taking additional DEPS on content and a
+ // service_manager::Connector() for simplicitly and to avoid the need for
+ // using the service test harness just for instantiating this class.
+ explicit SingleSampleMetricsFactoryImpl(CreateProviderCB create_provider_cb);
+ ~SingleSampleMetricsFactoryImpl() override;
+
+ // base::SingleSampleMetricsFactory:
+ std::unique_ptr<base::SingleSampleMetric> CreateCustomCountsMetric(
+ const std::string& histogram_name,
+ base::HistogramBase::Sample min,
+ base::HistogramBase::Sample max,
+ uint32_t bucket_count) override;
+
+ // Providers live forever in production, but tests should be kind and clean up
+ // after themselves to avoid tests trampling on one another. Destroys the
+ // provider in the TLS slot for the calling thread.
+ void DestroyProviderForTesting();
+
+ private:
+ // Creates a single sample metric.
+ std::unique_ptr<base::SingleSampleMetric> CreateMetric(
+ const std::string& histogram_name,
+ base::HistogramBase::Sample min,
+ base::HistogramBase::Sample max,
+ uint32_t bucket_count,
+ int32_t flags);
+
+ // Gets the SingleSampleMetricsProvider for the current thread. If none
+ // exists, then a new instance is created and set in the TLS slot.
+ mojom::SingleSampleMetricsProvider* GetProvider();
+
+ CreateProviderCB create_provider_cb_;
+
+ // Per thread storage slot for the mojo provider.
+ base::ThreadLocalPointer<mojom::SingleSampleMetricsProviderPtr> provider_tls_;
+
+ DISALLOW_COPY_AND_ASSIGN(SingleSampleMetricsFactoryImpl);
+};
+
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_SINGLE_VALUE_HISTOGRAM_FACTORY_IMPL_H_
diff --git a/components/metrics/single_sample_metrics_factory_impl_unittest.cc b/components/metrics/single_sample_metrics_factory_impl_unittest.cc
new file mode 100644
index 0000000..7362085
--- /dev/null
+++ b/components/metrics/single_sample_metrics_factory_impl_unittest.cc
@@ -0,0 +1,184 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/single_sample_metrics_factory_impl.h"
+
+#include "base/message_loop/message_loop.h"
+#include "base/metrics/dummy_histogram.h"
+#include "base/run_loop.h"
+#include "base/test/gtest_util.h"
+#include "base/test/metrics/histogram_tester.h"
+#include "base/threading/thread.h"
+#include "components/metrics/single_sample_metrics.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace metrics {
+
+namespace {
+
+const base::HistogramBase::Sample kMin = 1;
+const base::HistogramBase::Sample kMax = 10;
+const uint32_t kBucketCount = 10;
+const char kMetricName[] = "Single.Sample.Metric";
+
+class SingleSampleMetricsFactoryImplTest : public testing::Test {
+ public:
+ SingleSampleMetricsFactoryImplTest() : thread_("TestThread") {
+ InitializeSingleSampleMetricsFactory(
+ base::BindRepeating(&SingleSampleMetricsFactoryImplTest::CreateProvider,
+ base::Unretained(this)));
+ factory_ = static_cast<SingleSampleMetricsFactoryImpl*>(
+ base::SingleSampleMetricsFactory::Get());
+ }
+
+ ~SingleSampleMetricsFactoryImplTest() override {
+ factory_->DestroyProviderForTesting();
+ if (thread_.IsRunning())
+ ShutdownThread();
+ base::SingleSampleMetricsFactory::DeleteFactoryForTesting();
+ }
+
+ protected:
+ void StartThread() { ASSERT_TRUE(thread_.Start()); }
+
+ void ShutdownThread() {
+ thread_.task_runner()->PostTask(
+ FROM_HERE,
+ base::BindOnce(
+ &SingleSampleMetricsFactoryImpl::DestroyProviderForTesting,
+ base::Unretained(factory_)));
+ thread_.Stop();
+ }
+
+ void CreateProvider(mojom::SingleSampleMetricsProviderRequest request) {
+ CreateSingleSampleMetricsProvider(std::move(request));
+ provider_count_++;
+ }
+
+ std::unique_ptr<base::SingleSampleMetric> CreateMetricOnThread() {
+ std::unique_ptr<base::SingleSampleMetric> metric;
+ base::RunLoop run_loop;
+ thread_.task_runner()->PostTaskAndReply(
+ FROM_HERE,
+ base::BindOnce(
+ &SingleSampleMetricsFactoryImplTest::CreateAndStoreMetric,
+ base::Unretained(this), &metric),
+ run_loop.QuitClosure());
+ run_loop.Run();
+ return metric;
+ }
+
+ void CreateAndStoreMetric(std::unique_ptr<base::SingleSampleMetric>* metric) {
+ *metric = factory_->CreateCustomCountsMetric(kMetricName, kMin, kMax,
+ kBucketCount);
+ }
+
+ base::MessageLoop message_looqp_;
+ SingleSampleMetricsFactoryImpl* factory_;
+ base::Thread thread_;
+ size_t provider_count_ = 0;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(SingleSampleMetricsFactoryImplTest);
+};
+
+} // namespace
+
+TEST_F(SingleSampleMetricsFactoryImplTest, SingleProvider) {
+ std::unique_ptr<base::SingleSampleMetric> metric1 =
+ factory_->CreateCustomCountsMetric(kMetricName, kMin, kMax, kBucketCount);
+
+ std::unique_ptr<base::SingleSampleMetric> metric2 =
+ factory_->CreateCustomCountsMetric(kMetricName, kMin, kMax, kBucketCount);
+
+ // Verify that only a single provider is created for multiple metrics.
+ base::RunLoop().RunUntilIdle();
+ EXPECT_EQ(1u, provider_count_);
+}
+
+TEST_F(SingleSampleMetricsFactoryImplTest, DoesNothing) {
+ base::HistogramTester tester;
+
+ std::unique_ptr<base::SingleSampleMetric> metric =
+ factory_->CreateCustomCountsMetric(kMetricName, kMin, kMax, kBucketCount);
+ metric.reset();
+
+ // Verify that no sample is recorded if SetSample() is never called.
+ base::RunLoop().RunUntilIdle();
+ tester.ExpectTotalCount(kMetricName, 0);
+}
+
+TEST_F(SingleSampleMetricsFactoryImplTest, DefaultSingleSampleMetricWithValue) {
+ base::HistogramTester tester;
+ std::unique_ptr<base::SingleSampleMetric> metric =
+ factory_->CreateCustomCountsMetric(kMetricName, kMin, kMax, kBucketCount);
+
+ const base::HistogramBase::Sample kLastSample = 9;
+ metric->SetSample(1);
+ metric->SetSample(3);
+ metric->SetSample(5);
+ metric->SetSample(kLastSample);
+ metric.reset();
+
+ // Verify only the last sample sent to SetSample() is recorded.
+ base::RunLoop().RunUntilIdle();
+ tester.ExpectUniqueSample(kMetricName, kLastSample, 1);
+
+ // Verify construction implicitly by requesting a histogram with the same
+ // parameters; this test relies on the fact that histogram objects are unique
+ // per name. Different parameters will result in a Dummy histogram returned.
+ EXPECT_EQ(base::DummyHistogram::GetInstance(),
+ base::Histogram::FactoryGet(kMetricName, 1, 3, 3,
+ base::HistogramBase::kNoFlags));
+ EXPECT_NE(base::DummyHistogram::GetInstance(),
+ base::Histogram::FactoryGet(
+ kMetricName, kMin, kMax, kBucketCount,
+ base::HistogramBase::kUmaTargetedHistogramFlag));
+}
+
+TEST_F(SingleSampleMetricsFactoryImplTest, MultithreadedMetrics) {
+ base::HistogramTester tester;
+ std::unique_ptr<base::SingleSampleMetric> metric =
+ factory_->CreateCustomCountsMetric(kMetricName, kMin, kMax, kBucketCount);
+ EXPECT_EQ(1u, provider_count_);
+
+ StartThread();
+
+ std::unique_ptr<base::SingleSampleMetric> threaded_metric =
+ CreateMetricOnThread();
+ ASSERT_TRUE(threaded_metric);
+
+ // A second provider should be created to handle requests on our new thread.
+ EXPECT_EQ(2u, provider_count_);
+
+ // Calls from the wrong thread should DCHECK.
+ EXPECT_DCHECK_DEATH(threaded_metric->SetSample(5));
+ EXPECT_DCHECK_DEATH(threaded_metric.reset());
+
+ // Test that samples are set on each thread correctly.
+ const base::HistogramBase::Sample kSample = 7;
+
+ {
+ metric->SetSample(kSample);
+
+ base::RunLoop run_loop;
+ thread_.task_runner()->PostTaskAndReply(
+ FROM_HERE,
+ base::BindOnce(&base::SingleSampleMetric::SetSample,
+ base::Unretained(threaded_metric.get()), kSample),
+ run_loop.QuitClosure());
+ run_loop.Run();
+ }
+
+ // Release metrics and shutdown thread to ensure destruction completes.
+ thread_.task_runner()->DeleteSoon(FROM_HERE, threaded_metric.release());
+ ShutdownThread();
+
+ metric.reset();
+ base::RunLoop().RunUntilIdle();
+
+ tester.ExpectUniqueSample(kMetricName, kSample, 2);
+}
+
+} // namespace metrics
diff --git a/components/metrics/stability_metrics_helper.cc b/components/metrics/stability_metrics_helper.cc
new file mode 100644
index 0000000..517fc71
--- /dev/null
+++ b/components/metrics/stability_metrics_helper.cc
@@ -0,0 +1,322 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/stability_metrics_helper.h"
+
+#include <stdint.h>
+
+#include <vector>
+
+#include "base/logging.h"
+#include "base/metrics/histogram_functions.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/metrics/user_metrics.h"
+#include "build/build_config.h"
+#include "build/buildflag.h"
+#include "components/metrics/metrics_pref_names.h"
+#include "components/prefs/pref_registry_simple.h"
+#include "components/prefs/pref_service.h"
+#include "components/variations/hashing.h"
+#include "extensions/buildflags/buildflags.h"
+#include "third_party/metrics_proto/system_profile.pb.h"
+
+#if defined(OS_WIN)
+#include <windows.h> // Needed for STATUS_* codes
+#endif
+
+#if defined(OS_CHROMEOS)
+#include "components/metrics/system_memory_stats_recorder.h"
+#endif
+
+namespace metrics {
+
+namespace {
+
+enum RendererType {
+ RENDERER_TYPE_RENDERER = 1,
+ RENDERER_TYPE_EXTENSION,
+ // NOTE: Add new action types only immediately above this line. Also,
+ // make sure the enum list in tools/metrics/histograms/histograms.xml is
+ // updated with any change in here.
+ RENDERER_TYPE_COUNT
+};
+
+// Converts an exit code into something that can be inserted into our
+// histograms (which expect non-negative numbers less than MAX_INT).
+int MapCrashExitCodeForHistogram(int exit_code) {
+#if defined(OS_WIN)
+ // Since |abs(STATUS_GUARD_PAGE_VIOLATION) == MAX_INT| it causes problems in
+ // histograms.cc. Solve this by remapping it to a smaller value, which
+ // hopefully doesn't conflict with other codes.
+ if (static_cast<DWORD>(exit_code) == STATUS_GUARD_PAGE_VIOLATION)
+ return 0x1FCF7EC3; // Randomly picked number.
+#endif
+
+ return std::abs(exit_code);
+}
+
+void RecordChildKills(RendererType histogram_type) {
+ UMA_HISTOGRAM_ENUMERATION("BrowserRenderProcessHost.ChildKills",
+ histogram_type, RENDERER_TYPE_COUNT);
+}
+
+// Macro for logging the age of a crashed process.
+//
+// Notes:
+// - IMPORTANT: When changing the constants below, please change the names of
+// the histograms logged via UMA_HISTOGRAM_CRASHED_PROCESS_AGE.
+// - 99th percentile of Memory.Experimental.Renderer.Uptime hovers around 17h.
+// - |kCrashedProcessAgeMin| is as low as possible, so that we may with
+// high-confidence categorize crashes that occur during early startup (e.g.
+// crashes that end up with STATUS_DLL_INIT_FAILED or STATUS_DLL_NOT_FOUND).
+// - Note that even with just 50 buckets, we still get narrow and accurate
+// buckets at the lower end: 0ms, 1ms, 2ms, 3ms, 4-5ms, 6-8ms, 9-12ms, ...
+constexpr auto kCrashedProcessAgeMin = base::TimeDelta::FromMilliseconds(1);
+constexpr auto kCrashedProcessAgeMax = base::TimeDelta::FromHours(48);
+constexpr uint32_t kCrashedProcessAgeCount = 50;
+#define UMA_HISTOGRAM_CRASHED_PROCESS_AGE(histogram_name, uptime) \
+ UMA_HISTOGRAM_CUSTOM_TIMES(histogram_name, uptime, kCrashedProcessAgeMin, \
+ kCrashedProcessAgeMax, kCrashedProcessAgeCount)
+
+} // namespace
+
+StabilityMetricsHelper::StabilityMetricsHelper(PrefService* local_state)
+ : local_state_(local_state) {
+ DCHECK(local_state_);
+}
+
+StabilityMetricsHelper::~StabilityMetricsHelper() {}
+
+void StabilityMetricsHelper::ProvideStabilityMetrics(
+ SystemProfileProto* system_profile_proto) {
+ SystemProfileProto_Stability* stability_proto =
+ system_profile_proto->mutable_stability();
+
+ int count = local_state_->GetInteger(prefs::kStabilityPageLoadCount);
+ if (count) {
+ stability_proto->set_page_load_count(count);
+ local_state_->SetInteger(prefs::kStabilityPageLoadCount, 0);
+ }
+
+ count = local_state_->GetInteger(prefs::kStabilityChildProcessCrashCount);
+ if (count) {
+ stability_proto->set_child_process_crash_count(count);
+ local_state_->SetInteger(prefs::kStabilityChildProcessCrashCount, 0);
+ }
+
+ count = local_state_->GetInteger(prefs::kStabilityRendererCrashCount);
+ if (count) {
+ stability_proto->set_renderer_crash_count(count);
+ local_state_->SetInteger(prefs::kStabilityRendererCrashCount, 0);
+ }
+
+ count = local_state_->GetInteger(prefs::kStabilityRendererFailedLaunchCount);
+ if (count) {
+ stability_proto->set_renderer_failed_launch_count(count);
+ local_state_->SetInteger(prefs::kStabilityRendererFailedLaunchCount, 0);
+ }
+
+ count = local_state_->GetInteger(prefs::kStabilityRendererLaunchCount);
+ if (count) {
+ stability_proto->set_renderer_launch_count(count);
+ local_state_->SetInteger(prefs::kStabilityRendererLaunchCount, 0);
+ }
+
+ count =
+ local_state_->GetInteger(prefs::kStabilityExtensionRendererCrashCount);
+ if (count) {
+ stability_proto->set_extension_renderer_crash_count(count);
+ local_state_->SetInteger(prefs::kStabilityExtensionRendererCrashCount, 0);
+ }
+
+ count = local_state_->GetInteger(
+ prefs::kStabilityExtensionRendererFailedLaunchCount);
+ if (count) {
+ stability_proto->set_extension_renderer_failed_launch_count(count);
+ local_state_->SetInteger(
+ prefs::kStabilityExtensionRendererFailedLaunchCount, 0);
+ }
+
+ count = local_state_->GetInteger(prefs::kStabilityRendererHangCount);
+ if (count) {
+ stability_proto->set_renderer_hang_count(count);
+ local_state_->SetInteger(prefs::kStabilityRendererHangCount, 0);
+ }
+
+ count =
+ local_state_->GetInteger(prefs::kStabilityExtensionRendererLaunchCount);
+ if (count) {
+ stability_proto->set_extension_renderer_launch_count(count);
+ local_state_->SetInteger(prefs::kStabilityExtensionRendererLaunchCount, 0);
+ }
+}
+
+void StabilityMetricsHelper::ClearSavedStabilityMetrics() {
+ // Clear all the prefs used in this class in UMA reports (which doesn't
+ // include |kUninstallMetricsPageLoadCount| as it's not sent up by UMA).
+ local_state_->SetInteger(prefs::kStabilityChildProcessCrashCount, 0);
+ local_state_->SetInteger(prefs::kStabilityExtensionRendererCrashCount, 0);
+ local_state_->SetInteger(prefs::kStabilityExtensionRendererFailedLaunchCount,
+ 0);
+ local_state_->SetInteger(prefs::kStabilityExtensionRendererLaunchCount, 0);
+ local_state_->SetInteger(prefs::kStabilityPageLoadCount, 0);
+ local_state_->SetInteger(prefs::kStabilityRendererCrashCount, 0);
+ local_state_->SetInteger(prefs::kStabilityRendererFailedLaunchCount, 0);
+ local_state_->SetInteger(prefs::kStabilityRendererHangCount, 0);
+ local_state_->SetInteger(prefs::kStabilityRendererLaunchCount, 0);
+}
+
+// static
+void StabilityMetricsHelper::RegisterPrefs(PrefRegistrySimple* registry) {
+ registry->RegisterIntegerPref(prefs::kStabilityChildProcessCrashCount, 0);
+ registry->RegisterIntegerPref(prefs::kStabilityExtensionRendererCrashCount,
+ 0);
+ registry->RegisterIntegerPref(
+ prefs::kStabilityExtensionRendererFailedLaunchCount, 0);
+ registry->RegisterIntegerPref(prefs::kStabilityExtensionRendererLaunchCount,
+ 0);
+ registry->RegisterIntegerPref(prefs::kStabilityPageLoadCount, 0);
+ registry->RegisterIntegerPref(prefs::kStabilityRendererCrashCount, 0);
+ registry->RegisterIntegerPref(prefs::kStabilityRendererFailedLaunchCount, 0);
+ registry->RegisterIntegerPref(prefs::kStabilityRendererHangCount, 0);
+ registry->RegisterIntegerPref(prefs::kStabilityRendererLaunchCount, 0);
+
+ registry->RegisterInt64Pref(prefs::kUninstallMetricsPageLoadCount, 0);
+}
+
+void StabilityMetricsHelper::IncreaseRendererCrashCount() {
+ IncrementPrefValue(prefs::kStabilityRendererCrashCount);
+}
+
+void StabilityMetricsHelper::BrowserUtilityProcessLaunched(
+ const std::string& metrics_name) {
+ uint32_t hash = variations::HashName(metrics_name);
+ base::UmaHistogramSparse("ChildProcess.Launched.UtilityProcessHash", hash);
+}
+
+void StabilityMetricsHelper::BrowserUtilityProcessCrashed(
+ const std::string& metrics_name,
+ int exit_code) {
+ // TODO(wfh): there doesn't appear to be a good way to log these exit_codes
+ // without adding something into the stability proto, so for now only log the
+ // crash and if the numbers are high enough, logging exit codes can be added
+ // later.
+ uint32_t hash = variations::HashName(metrics_name);
+ base::UmaHistogramSparse("ChildProcess.Crashed.UtilityProcessHash", hash);
+}
+
+void StabilityMetricsHelper::BrowserChildProcessCrashed() {
+ IncrementPrefValue(prefs::kStabilityChildProcessCrashCount);
+}
+
+void StabilityMetricsHelper::LogLoadStarted(bool is_incognito) {
+ base::RecordAction(base::UserMetricsAction("PageLoad"));
+ if (is_incognito)
+ base::RecordAction(base::UserMetricsAction("PageLoadInIncognito"));
+ IncrementPrefValue(prefs::kStabilityPageLoadCount);
+ IncrementLongPrefsValue(prefs::kUninstallMetricsPageLoadCount);
+ // We need to save the prefs, as page load count is a critical stat, and it
+ // might be lost due to a crash :-(.
+}
+
+void StabilityMetricsHelper::LogRendererCrash(
+ bool was_extension_process,
+ base::TerminationStatus status,
+ int exit_code,
+ base::Optional<base::TimeDelta> uptime) {
+ RendererType histogram_type =
+ was_extension_process ? RENDERER_TYPE_EXTENSION : RENDERER_TYPE_RENDERER;
+
+ switch (status) {
+ case base::TERMINATION_STATUS_NORMAL_TERMINATION:
+ break;
+ case base::TERMINATION_STATUS_PROCESS_CRASHED:
+ case base::TERMINATION_STATUS_ABNORMAL_TERMINATION:
+ case base::TERMINATION_STATUS_OOM:
+ if (was_extension_process) {
+#if !BUILDFLAG(ENABLE_EXTENSIONS)
+ NOTREACHED();
+#endif
+ IncrementPrefValue(prefs::kStabilityExtensionRendererCrashCount);
+
+ base::UmaHistogramSparse("CrashExitCodes.Extension",
+ MapCrashExitCodeForHistogram(exit_code));
+ if (uptime.has_value()) {
+ UMA_HISTOGRAM_CRASHED_PROCESS_AGE(
+ "Stability.CrashedProcessAge.Extension", uptime.value());
+ }
+ } else {
+ IncrementPrefValue(prefs::kStabilityRendererCrashCount);
+
+ base::UmaHistogramSparse("CrashExitCodes.Renderer",
+ MapCrashExitCodeForHistogram(exit_code));
+ if (uptime.has_value()) {
+ UMA_HISTOGRAM_CRASHED_PROCESS_AGE(
+ "Stability.CrashedProcessAge.Renderer", uptime.value());
+ }
+ }
+
+ UMA_HISTOGRAM_ENUMERATION("BrowserRenderProcessHost.ChildCrashes",
+ histogram_type, RENDERER_TYPE_COUNT);
+ break;
+ case base::TERMINATION_STATUS_PROCESS_WAS_KILLED:
+ RecordChildKills(histogram_type);
+ break;
+#if defined(OS_ANDROID)
+ case base::TERMINATION_STATUS_OOM_PROTECTED:
+ // TODO(wfh): Check if this should be a Kill or a Crash on Android.
+ break;
+#endif
+#if defined(OS_CHROMEOS)
+ case base::TERMINATION_STATUS_PROCESS_WAS_KILLED_BY_OOM:
+ RecordChildKills(histogram_type);
+ UMA_HISTOGRAM_ENUMERATION("BrowserRenderProcessHost.ChildKills.OOM",
+ was_extension_process ? 2 : 1, 3);
+ RecordMemoryStats(was_extension_process
+ ? RECORD_MEMORY_STATS_EXTENSIONS_OOM_KILLED
+ : RECORD_MEMORY_STATS_CONTENTS_OOM_KILLED);
+ break;
+#endif
+ case base::TERMINATION_STATUS_STILL_RUNNING:
+ UMA_HISTOGRAM_ENUMERATION("BrowserRenderProcessHost.DisconnectedAlive",
+ histogram_type, RENDERER_TYPE_COUNT);
+ break;
+ case base::TERMINATION_STATUS_LAUNCH_FAILED:
+ UMA_HISTOGRAM_ENUMERATION("BrowserRenderProcessHost.ChildLaunchFailures",
+ histogram_type, RENDERER_TYPE_COUNT);
+ base::UmaHistogramSparse(
+ "BrowserRenderProcessHost.ChildLaunchFailureCodes", exit_code);
+ if (was_extension_process)
+ IncrementPrefValue(prefs::kStabilityExtensionRendererFailedLaunchCount);
+ else
+ IncrementPrefValue(prefs::kStabilityRendererFailedLaunchCount);
+ break;
+ case base::TERMINATION_STATUS_MAX_ENUM:
+ NOTREACHED();
+ break;
+ }
+}
+
+void StabilityMetricsHelper::LogRendererLaunched(bool was_extension_process) {
+ if (was_extension_process)
+ IncrementPrefValue(prefs::kStabilityExtensionRendererLaunchCount);
+ else
+ IncrementPrefValue(prefs::kStabilityRendererLaunchCount);
+}
+
+void StabilityMetricsHelper::IncrementPrefValue(const char* path) {
+ int value = local_state_->GetInteger(path);
+ local_state_->SetInteger(path, value + 1);
+}
+
+void StabilityMetricsHelper::IncrementLongPrefsValue(const char* path) {
+ int64_t value = local_state_->GetInt64(path);
+ local_state_->SetInt64(path, value + 1);
+}
+
+void StabilityMetricsHelper::LogRendererHang() {
+ IncrementPrefValue(prefs::kStabilityRendererHangCount);
+}
+
+} // namespace metrics
diff --git a/components/metrics/stability_metrics_helper.h b/components/metrics/stability_metrics_helper.h
new file mode 100644
index 0000000..5a22882
--- /dev/null
+++ b/components/metrics/stability_metrics_helper.h
@@ -0,0 +1,78 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_STABILITY_METRICS_HELPER_H_
+#define COMPONENTS_METRICS_STABILITY_METRICS_HELPER_H_
+
+#include "base/macros.h"
+#include "base/optional.h"
+#include "base/process/kill.h"
+#include "base/time/time.h"
+
+class PrefRegistrySimple;
+class PrefService;
+
+namespace metrics {
+
+class SystemProfileProto;
+
+// StabilityMetricsHelper is a class that providers functionality common to
+// different embedders' stability metrics providers.
+class StabilityMetricsHelper {
+ public:
+ explicit StabilityMetricsHelper(PrefService* local_state);
+ ~StabilityMetricsHelper();
+
+ // Provides stability metrics.
+ void ProvideStabilityMetrics(SystemProfileProto* system_profile_proto);
+
+ // Clears the gathered stability metrics.
+ void ClearSavedStabilityMetrics();
+
+ // Records a utility process launch with name |metrics_name|.
+ void BrowserUtilityProcessLaunched(const std::string& metrics_name);
+
+ // Records a utility process crash with name |metrics_name|.
+ void BrowserUtilityProcessCrashed(const std::string& metrics_name,
+ int exit_code);
+
+ // Records a browser child process crash.
+ void BrowserChildProcessCrashed();
+
+ // Logs the initiation of a page load.
+ void LogLoadStarted(bool is_incognito);
+
+ // Records a renderer process crash.
+ void LogRendererCrash(bool was_extension_process,
+ base::TerminationStatus status,
+ int exit_code,
+ base::Optional<base::TimeDelta> uptime);
+
+ // Records that a new renderer process was successfully launched.
+ void LogRendererLaunched(bool was_extension_process);
+
+ // Records a renderer process hang.
+ void LogRendererHang();
+
+ // Registers local state prefs used by this class.
+ static void RegisterPrefs(PrefRegistrySimple* registry);
+
+ // Increments the RendererCrash pref.
+ void IncreaseRendererCrashCount();
+
+ private:
+ // Increments an Integer pref value specified by |path|.
+ void IncrementPrefValue(const char* path);
+
+ // Increments a 64-bit Integer pref value specified by |path|.
+ void IncrementLongPrefsValue(const char* path);
+
+ PrefService* local_state_;
+
+ DISALLOW_COPY_AND_ASSIGN(StabilityMetricsHelper);
+};
+
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_STABILITY_METRICS_HELPER_H_
diff --git a/components/metrics/stability_metrics_helper_unittest.cc b/components/metrics/stability_metrics_helper_unittest.cc
new file mode 100644
index 0000000..66fc041
--- /dev/null
+++ b/components/metrics/stability_metrics_helper_unittest.cc
@@ -0,0 +1,159 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/stability_metrics_helper.h"
+
+#include <memory>
+
+#include "base/macros.h"
+#include "base/test/metrics/histogram_tester.h"
+#include "build/build_config.h"
+#include "components/prefs/pref_service.h"
+#include "components/prefs/scoped_user_pref_update.h"
+#include "components/prefs/testing_pref_service.h"
+#include "extensions/buildflags/buildflags.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/metrics_proto/system_profile.pb.h"
+
+namespace metrics {
+
+namespace {
+
+enum RendererType {
+ RENDERER_TYPE_RENDERER = 1,
+ RENDERER_TYPE_EXTENSION,
+ // NOTE: Add new action types only immediately above this line. Also,
+ // make sure the enum list in tools/metrics/histograms/histograms.xml is
+ // updated with any change in here.
+ RENDERER_TYPE_COUNT
+};
+
+class StabilityMetricsHelperTest : public testing::Test {
+ protected:
+ StabilityMetricsHelperTest() : prefs_(new TestingPrefServiceSimple) {
+ StabilityMetricsHelper::RegisterPrefs(prefs()->registry());
+ }
+
+ TestingPrefServiceSimple* prefs() { return prefs_.get(); }
+
+ private:
+ std::unique_ptr<TestingPrefServiceSimple> prefs_;
+
+ DISALLOW_COPY_AND_ASSIGN(StabilityMetricsHelperTest);
+};
+
+} // namespace
+
+TEST_F(StabilityMetricsHelperTest, BrowserChildProcessCrashed) {
+ StabilityMetricsHelper helper(prefs());
+
+ helper.BrowserChildProcessCrashed();
+ helper.BrowserChildProcessCrashed();
+
+ // Call ProvideStabilityMetrics to check that it will force pending tasks to
+ // be executed immediately.
+ metrics::SystemProfileProto system_profile;
+
+ helper.ProvideStabilityMetrics(&system_profile);
+
+ // Check current number of instances created.
+ const metrics::SystemProfileProto_Stability& stability =
+ system_profile.stability();
+
+ EXPECT_EQ(2, stability.child_process_crash_count());
+}
+
+TEST_F(StabilityMetricsHelperTest, LogRendererCrash) {
+ StabilityMetricsHelper helper(prefs());
+ base::HistogramTester histogram_tester;
+ const base::TimeDelta kUptime = base::TimeDelta::FromSeconds(123);
+
+ // Crash and abnormal termination should increment renderer crash count.
+ helper.LogRendererCrash(false, base::TERMINATION_STATUS_PROCESS_CRASHED, 1,
+ kUptime);
+
+ helper.LogRendererCrash(false, base::TERMINATION_STATUS_ABNORMAL_TERMINATION,
+ 1, kUptime);
+
+ // OOM should increment renderer crash count.
+ helper.LogRendererCrash(false, base::TERMINATION_STATUS_OOM, 1, kUptime);
+
+ // Kill does not increment renderer crash count.
+ helper.LogRendererCrash(false, base::TERMINATION_STATUS_PROCESS_WAS_KILLED, 1,
+ kUptime);
+
+ // Failed launch increments failed launch count.
+ helper.LogRendererCrash(false, base::TERMINATION_STATUS_LAUNCH_FAILED, 1,
+ kUptime);
+
+ metrics::SystemProfileProto system_profile;
+
+ // Call ProvideStabilityMetrics to check that it will force pending tasks to
+ // be executed immediately.
+ helper.ProvideStabilityMetrics(&system_profile);
+
+ EXPECT_EQ(3, system_profile.stability().renderer_crash_count());
+ EXPECT_EQ(1, system_profile.stability().renderer_failed_launch_count());
+ EXPECT_EQ(0, system_profile.stability().extension_renderer_crash_count());
+
+ histogram_tester.ExpectUniqueSample("CrashExitCodes.Renderer", 1, 3);
+ histogram_tester.ExpectBucketCount("BrowserRenderProcessHost.ChildCrashes",
+ RENDERER_TYPE_RENDERER, 3);
+
+ // One launch failure each.
+ histogram_tester.ExpectBucketCount(
+ "BrowserRenderProcessHost.ChildLaunchFailures", RENDERER_TYPE_RENDERER,
+ 1);
+
+ // TERMINATION_STATUS_PROCESS_WAS_KILLED for a renderer.
+ histogram_tester.ExpectBucketCount("BrowserRenderProcessHost.ChildKills",
+ RENDERER_TYPE_RENDERER, 1);
+ histogram_tester.ExpectBucketCount("BrowserRenderProcessHost.ChildKills",
+ RENDERER_TYPE_EXTENSION, 0);
+ histogram_tester.ExpectBucketCount(
+ "BrowserRenderProcessHost.ChildLaunchFailureCodes", 1, 1);
+ histogram_tester.ExpectUniqueSample("Stability.CrashedProcessAge.Renderer",
+ kUptime.InMilliseconds(), 3);
+}
+
+// Note: ENABLE_EXTENSIONS is set to false in Android
+#if BUILDFLAG(ENABLE_EXTENSIONS)
+TEST_F(StabilityMetricsHelperTest, LogRendererCrashEnableExtensions) {
+ StabilityMetricsHelper helper(prefs());
+ base::HistogramTester histogram_tester;
+ const base::TimeDelta kUptime = base::TimeDelta::FromSeconds(123);
+
+ // Crash and abnormal termination should increment extension crash count.
+ helper.LogRendererCrash(true, base::TERMINATION_STATUS_PROCESS_CRASHED, 1,
+ kUptime);
+
+ // OOM should increment extension renderer crash count.
+ helper.LogRendererCrash(true, base::TERMINATION_STATUS_OOM, 1, kUptime);
+
+ // Failed launch increments extension failed launch count.
+ helper.LogRendererCrash(true, base::TERMINATION_STATUS_LAUNCH_FAILED, 1,
+ kUptime);
+
+ metrics::SystemProfileProto system_profile;
+ helper.ProvideStabilityMetrics(&system_profile);
+
+ EXPECT_EQ(0, system_profile.stability().renderer_crash_count());
+ EXPECT_EQ(2, system_profile.stability().extension_renderer_crash_count());
+ EXPECT_EQ(
+ 1, system_profile.stability().extension_renderer_failed_launch_count());
+
+ histogram_tester.ExpectBucketCount(
+ "BrowserRenderProcessHost.ChildLaunchFailureCodes", 1, 1);
+ histogram_tester.ExpectUniqueSample("CrashExitCodes.Extension", 1, 2);
+ histogram_tester.ExpectBucketCount("BrowserRenderProcessHost.ChildCrashes",
+ RENDERER_TYPE_EXTENSION, 2);
+ histogram_tester.ExpectBucketCount(
+ "BrowserRenderProcessHost.ChildLaunchFailures", RENDERER_TYPE_EXTENSION,
+ 1);
+ histogram_tester.ExpectUniqueSample("Stability.CrashedProcessAge.Extension",
+ kUptime.InMilliseconds(), 2);
+}
+#endif
+
+} // namespace metrics
diff --git a/components/metrics/stability_metrics_provider.cc b/components/metrics/stability_metrics_provider.cc
new file mode 100644
index 0000000..7d411d3
--- /dev/null
+++ b/components/metrics/stability_metrics_provider.cc
@@ -0,0 +1,262 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/stability_metrics_provider.h"
+
+#include "base/metrics/histogram_macros.h"
+#include "build/build_config.h"
+#include "components/metrics/metrics_pref_names.h"
+#include "components/prefs/pref_registry_simple.h"
+#include "components/prefs/pref_service.h"
+#include "third_party/metrics_proto/system_profile.pb.h"
+
+#if defined(OS_ANDROID)
+#include "base/android/build_info.h"
+#endif
+#if defined(OS_WIN)
+#include "components/metrics/system_session_analyzer_win.h"
+#endif
+
+namespace metrics {
+
+namespace {
+
+#if defined(OS_ANDROID)
+bool HasGmsCoreVersionChanged(PrefService* local_state) {
+ std::string previous_version =
+ local_state->GetString(prefs::kStabilityGmsCoreVersion);
+ std::string current_version =
+ base::android::BuildInfo::GetInstance()->gms_version_code();
+
+ // If the last version is empty, treat it as consistent.
+ if (previous_version.empty())
+ return false;
+
+ return previous_version != current_version;
+}
+
+void UpdateGmsCoreVersionPref(PrefService* local_state) {
+ std::string current_version =
+ base::android::BuildInfo::GetInstance()->gms_version_code();
+ local_state->SetString(prefs::kStabilityGmsCoreVersion, current_version);
+}
+#endif
+
+} // namespace
+
+StabilityMetricsProvider::StabilityMetricsProvider(PrefService* local_state)
+ : local_state_(local_state) {}
+
+StabilityMetricsProvider::~StabilityMetricsProvider() = default;
+
+// static
+void StabilityMetricsProvider::RegisterPrefs(PrefRegistrySimple* registry) {
+ registry->RegisterIntegerPref(prefs::kStabilityCrashCount, 0);
+ registry->RegisterIntegerPref(prefs::kStabilityIncompleteSessionEndCount, 0);
+ registry->RegisterBooleanPref(prefs::kStabilitySessionEndCompleted, true);
+ registry->RegisterIntegerPref(prefs::kStabilityLaunchCount, 0);
+ registry->RegisterIntegerPref(prefs::kStabilityBreakpadRegistrationFail, 0);
+ registry->RegisterIntegerPref(prefs::kStabilityBreakpadRegistrationSuccess,
+ 0);
+ registry->RegisterIntegerPref(prefs::kStabilityDebuggerPresent, 0);
+ registry->RegisterIntegerPref(prefs::kStabilityDebuggerNotPresent, 0);
+ registry->RegisterIntegerPref(prefs::kStabilityDeferredCount, 0);
+ registry->RegisterIntegerPref(prefs::kStabilityDiscardCount, 0);
+ registry->RegisterIntegerPref(prefs::kStabilityVersionMismatchCount, 0);
+#if defined(OS_ANDROID)
+ registry->RegisterStringPref(prefs::kStabilityGmsCoreVersion, "");
+ registry->RegisterIntegerPref(prefs::kStabilityCrashCountWithoutGmsCoreUpdate,
+ 0);
+#endif
+#if defined(OS_WIN)
+ registry->RegisterIntegerPref(prefs::kStabilitySystemCrashCount, 0);
+#endif
+}
+
+void StabilityMetricsProvider::Init() {
+#if defined(OS_ANDROID)
+ // This method has to be called after HasGmsCoreVersionChanged() to avoid
+ // overwriting thie result.
+ UpdateGmsCoreVersionPref(local_state_);
+#endif
+}
+
+void StabilityMetricsProvider::ClearSavedStabilityMetrics() {
+ local_state_->SetInteger(prefs::kStabilityCrashCount, 0);
+ local_state_->SetInteger(prefs::kStabilityIncompleteSessionEndCount, 0);
+ local_state_->SetInteger(prefs::kStabilityBreakpadRegistrationSuccess, 0);
+ local_state_->SetInteger(prefs::kStabilityBreakpadRegistrationFail, 0);
+ local_state_->SetInteger(prefs::kStabilityDebuggerPresent, 0);
+ local_state_->SetInteger(prefs::kStabilityDebuggerNotPresent, 0);
+ local_state_->SetInteger(prefs::kStabilityLaunchCount, 0);
+ local_state_->SetBoolean(prefs::kStabilitySessionEndCompleted, true);
+ local_state_->SetInteger(prefs::kStabilityDeferredCount, 0);
+ // Note: kStabilityDiscardCount is not cleared as its intent is to measure
+ // the number of times data is discarded, even across versions.
+ local_state_->SetInteger(prefs::kStabilityVersionMismatchCount, 0);
+#if defined(OS_WIN)
+ local_state_->SetInteger(prefs::kStabilitySystemCrashCount, 0);
+#endif
+}
+
+void StabilityMetricsProvider::ProvideStabilityMetrics(
+ SystemProfileProto* system_profile) {
+ SystemProfileProto::Stability* stability =
+ system_profile->mutable_stability();
+
+ int pref_value = 0;
+
+ if (GetPrefValue(prefs::kStabilityLaunchCount, &pref_value))
+ stability->set_launch_count(pref_value);
+
+ if (GetPrefValue(prefs::kStabilityCrashCount, &pref_value))
+ stability->set_crash_count(pref_value);
+
+#if defined(OS_ANDROID)
+ if (GetPrefValue(prefs::kStabilityCrashCountWithoutGmsCoreUpdate,
+ &pref_value)) {
+ stability->set_crash_count_without_gms_core_update(pref_value);
+ }
+#endif
+
+ if (GetPrefValue(prefs::kStabilityIncompleteSessionEndCount, &pref_value))
+ stability->set_incomplete_shutdown_count(pref_value);
+
+ if (GetPrefValue(prefs::kStabilityBreakpadRegistrationSuccess, &pref_value))
+ stability->set_breakpad_registration_success_count(pref_value);
+
+ if (GetPrefValue(prefs::kStabilityBreakpadRegistrationFail, &pref_value))
+ stability->set_breakpad_registration_failure_count(pref_value);
+
+ if (GetPrefValue(prefs::kStabilityDebuggerPresent, &pref_value))
+ stability->set_debugger_present_count(pref_value);
+
+ if (GetPrefValue(prefs::kStabilityDebuggerNotPresent, &pref_value))
+ stability->set_debugger_not_present_count(pref_value);
+
+ // Note: only logging the following histograms for non-zero values.
+ if (GetPrefValue(prefs::kStabilityDeferredCount, &pref_value)) {
+ UMA_STABILITY_HISTOGRAM_COUNTS_100(
+ "Stability.Internals.InitialStabilityLogDeferredCount", pref_value);
+ }
+
+ // Note: only logging the following histograms for non-zero values.
+ if (GetPrefValue(prefs::kStabilityDiscardCount, &pref_value)) {
+ UMA_STABILITY_HISTOGRAM_COUNTS_100("Stability.Internals.DataDiscardCount",
+ pref_value);
+ }
+
+ // Note: only logging the following histograms for non-zero values.
+ if (GetPrefValue(prefs::kStabilityVersionMismatchCount, &pref_value)) {
+ UMA_STABILITY_HISTOGRAM_COUNTS_100(
+ "Stability.Internals.VersionMismatchCount", pref_value);
+ }
+
+#if defined(OS_WIN)
+ if (GetPrefValue(prefs::kStabilitySystemCrashCount, &pref_value)) {
+ UMA_STABILITY_HISTOGRAM_COUNTS_100("Stability.Internals.SystemCrashCount",
+ pref_value);
+ }
+#endif
+}
+
+void StabilityMetricsProvider::RecordBreakpadRegistration(bool success) {
+ if (!success)
+ IncrementPrefValue(prefs::kStabilityBreakpadRegistrationFail);
+ else
+ IncrementPrefValue(prefs::kStabilityBreakpadRegistrationSuccess);
+}
+
+void StabilityMetricsProvider::RecordBreakpadHasDebugger(bool has_debugger) {
+ if (!has_debugger)
+ IncrementPrefValue(prefs::kStabilityDebuggerNotPresent);
+ else
+ IncrementPrefValue(prefs::kStabilityDebuggerPresent);
+}
+
+void StabilityMetricsProvider::CheckLastSessionEndCompleted() {
+ if (!local_state_->GetBoolean(prefs::kStabilitySessionEndCompleted)) {
+ IncrementPrefValue(prefs::kStabilityIncompleteSessionEndCount);
+ // This is marked false when we get a WM_ENDSESSION.
+ MarkSessionEndCompleted(true);
+ }
+}
+
+void StabilityMetricsProvider::MarkSessionEndCompleted(bool end_completed) {
+ local_state_->SetBoolean(prefs::kStabilitySessionEndCompleted, end_completed);
+}
+
+void StabilityMetricsProvider::LogCrash(base::Time last_live_timestamp) {
+ IncrementPrefValue(prefs::kStabilityCrashCount);
+
+#if defined(OS_ANDROID)
+ // On Android, if there is an update for GMS core when Chrome is running,
+ // Chrome will be killed and restart. This is expected and we should only
+ // report crash if the GMS core version has not been changed.
+ if (!HasGmsCoreVersionChanged(local_state_))
+ IncrementPrefValue(prefs::kStabilityCrashCountWithoutGmsCoreUpdate);
+#endif
+
+#if defined(OS_WIN)
+ MaybeLogSystemCrash(last_live_timestamp);
+#endif
+}
+
+void StabilityMetricsProvider::LogStabilityLogDeferred() {
+ IncrementPrefValue(prefs::kStabilityDeferredCount);
+}
+
+void StabilityMetricsProvider::LogStabilityDataDiscarded() {
+ IncrementPrefValue(prefs::kStabilityDiscardCount);
+}
+
+void StabilityMetricsProvider::LogLaunch() {
+ IncrementPrefValue(prefs::kStabilityLaunchCount);
+}
+
+void StabilityMetricsProvider::LogStabilityVersionMismatch() {
+ IncrementPrefValue(prefs::kStabilityVersionMismatchCount);
+}
+
+#if defined(OS_WIN)
+bool StabilityMetricsProvider::IsUncleanSystemSession(
+ base::Time last_live_timestamp) {
+ DCHECK_NE(base::Time(), last_live_timestamp);
+ // There's a non-null last live timestamp, see if this occurred in
+ // a Windows system session that ended uncleanly. The expectation is that
+ // |last_live_timestamp| will have occurred in the immediately previous system
+ // session, but if the system has been restarted many times since Chrome last
+ // ran, that's not necessarily true. Log traversal can be expensive, so we
+ // limit the analyzer to reaching back three previous system sessions to bound
+ // the cost of the traversal.
+ SystemSessionAnalyzer analyzer(3);
+
+ SystemSessionAnalyzer::Status status =
+ analyzer.IsSessionUnclean(last_live_timestamp);
+
+ return status == SystemSessionAnalyzer::UNCLEAN;
+}
+
+void StabilityMetricsProvider::MaybeLogSystemCrash(
+ base::Time last_live_timestamp) {
+ if (last_live_timestamp != base::Time() &&
+ IsUncleanSystemSession(last_live_timestamp)) {
+ IncrementPrefValue(prefs::kStabilitySystemCrashCount);
+ }
+}
+#endif
+
+void StabilityMetricsProvider::IncrementPrefValue(const char* path) {
+ int value = local_state_->GetInteger(path);
+ local_state_->SetInteger(path, value + 1);
+}
+
+int StabilityMetricsProvider::GetPrefValue(const char* path, int* value) {
+ *value = local_state_->GetInteger(path);
+ if (*value != 0)
+ local_state_->SetInteger(path, 0);
+ return *value;
+}
+
+} // namespace metrics
diff --git a/components/metrics/stability_metrics_provider.h b/components/metrics/stability_metrics_provider.h
new file mode 100644
index 0000000..3aeff63
--- /dev/null
+++ b/components/metrics/stability_metrics_provider.h
@@ -0,0 +1,67 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_STABILITY_METRICS_PROVIDER_H_
+#define COMPONENTS_METRICS_STABILITY_METRICS_PROVIDER_H_
+
+#include "base/time/time.h"
+#include "build/build_config.h"
+#include "components/metrics/metrics_provider.h"
+
+class PrefService;
+class PrefRegistrySimple;
+
+namespace metrics {
+
+class SystemProfileProto;
+
+// Stores and loads system information to prefs for stability logs.
+class StabilityMetricsProvider : public MetricsProvider {
+ public:
+ StabilityMetricsProvider(PrefService* local_state);
+ ~StabilityMetricsProvider() override;
+
+ static void RegisterPrefs(PrefRegistrySimple* registry);
+
+ void RecordBreakpadRegistration(bool success);
+ void RecordBreakpadHasDebugger(bool has_debugger);
+
+ void CheckLastSessionEndCompleted();
+ void MarkSessionEndCompleted(bool end_completed);
+
+ void LogCrash(base::Time last_live_timestamp);
+ void LogStabilityLogDeferred();
+ void LogStabilityDataDiscarded();
+ void LogLaunch();
+ void LogStabilityVersionMismatch();
+
+ private:
+#if defined(OS_WIN)
+ // This function is virtual for testing. The |last_live_timestamp| is a
+ // time point where the previous browser was known to be alive, and is used
+ // to determine whether the system session embedding that timestamp terminated
+ // uncleanly.
+ virtual bool IsUncleanSystemSession(base::Time last_live_timestamp);
+ void MaybeLogSystemCrash(base::Time last_live_timestamp);
+#endif
+ // Increments an Integer pref value specified by |path|.
+ void IncrementPrefValue(const char* path);
+
+ // Gets pref value specified by |path| and resets it to 0 after retrieving.
+ int GetPrefValue(const char* path, int* value);
+
+ // MetricsProvider:
+ void Init() override;
+ void ClearSavedStabilityMetrics() override;
+ void ProvideStabilityMetrics(
+ SystemProfileProto* system_profile_proto) override;
+
+ PrefService* local_state_;
+
+ DISALLOW_COPY_AND_ASSIGN(StabilityMetricsProvider);
+};
+
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_STABILITY_METRICS_PROVIDER_H_
diff --git a/components/metrics/stability_metrics_provider_unittest.cc b/components/metrics/stability_metrics_provider_unittest.cc
new file mode 100644
index 0000000..e0ba0ce
--- /dev/null
+++ b/components/metrics/stability_metrics_provider_unittest.cc
@@ -0,0 +1,132 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/stability_metrics_provider.h"
+
+#include "base/test/metrics/histogram_tester.h"
+#include "build/build_config.h"
+#include "components/prefs/testing_pref_service.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/metrics_proto/system_profile.pb.h"
+
+namespace metrics {
+
+class StabilityMetricsProviderTest : public testing::Test {
+ public:
+ StabilityMetricsProviderTest() {
+ StabilityMetricsProvider::RegisterPrefs(prefs_.registry());
+ }
+
+ ~StabilityMetricsProviderTest() override {}
+
+ protected:
+ TestingPrefServiceSimple prefs_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(StabilityMetricsProviderTest);
+};
+
+TEST_F(StabilityMetricsProviderTest, ProvideStabilityMetrics) {
+ StabilityMetricsProvider stability_provider(&prefs_);
+ MetricsProvider* provider = &stability_provider;
+ SystemProfileProto system_profile;
+ provider->ProvideStabilityMetrics(&system_profile);
+
+ const SystemProfileProto_Stability& stability = system_profile.stability();
+ // Initial log metrics: only expected if non-zero.
+ EXPECT_FALSE(stability.has_launch_count());
+ EXPECT_FALSE(stability.has_crash_count());
+ EXPECT_FALSE(stability.has_incomplete_shutdown_count());
+ EXPECT_FALSE(stability.has_breakpad_registration_success_count());
+ EXPECT_FALSE(stability.has_breakpad_registration_failure_count());
+ EXPECT_FALSE(stability.has_debugger_present_count());
+ EXPECT_FALSE(stability.has_debugger_not_present_count());
+}
+
+TEST_F(StabilityMetricsProviderTest, RecordStabilityMetrics) {
+ {
+ StabilityMetricsProvider recorder(&prefs_);
+ recorder.LogLaunch();
+ recorder.LogCrash(base::Time());
+ recorder.MarkSessionEndCompleted(false);
+ recorder.CheckLastSessionEndCompleted();
+ recorder.RecordBreakpadRegistration(true);
+ recorder.RecordBreakpadRegistration(false);
+ recorder.RecordBreakpadHasDebugger(true);
+ recorder.RecordBreakpadHasDebugger(false);
+ }
+
+ {
+ StabilityMetricsProvider stability_provider(&prefs_);
+ MetricsProvider* provider = &stability_provider;
+ SystemProfileProto system_profile;
+ provider->ProvideStabilityMetrics(&system_profile);
+
+ const SystemProfileProto_Stability& stability = system_profile.stability();
+ // Initial log metrics: only expected if non-zero.
+ EXPECT_EQ(1, stability.launch_count());
+ EXPECT_EQ(1, stability.crash_count());
+ EXPECT_EQ(1, stability.incomplete_shutdown_count());
+ EXPECT_EQ(1, stability.breakpad_registration_success_count());
+ EXPECT_EQ(1, stability.breakpad_registration_failure_count());
+ EXPECT_EQ(1, stability.debugger_present_count());
+ EXPECT_EQ(1, stability.debugger_not_present_count());
+ }
+}
+
+#if defined(OS_WIN)
+namespace {
+
+class TestingStabilityMetricsProvider : public StabilityMetricsProvider {
+ public:
+ TestingStabilityMetricsProvider(PrefService* local_state,
+ base::Time unclean_session_time)
+ : StabilityMetricsProvider(local_state),
+ unclean_session_time_(unclean_session_time) {}
+
+ bool IsUncleanSystemSession(base::Time last_live_timestamp) override {
+ return last_live_timestamp == unclean_session_time_;
+ }
+
+ private:
+ const base::Time unclean_session_time_;
+};
+
+} // namespace
+
+TEST_F(StabilityMetricsProviderTest, RecordSystemCrashMetrics) {
+ {
+ base::Time unclean_time = base::Time::Now();
+ TestingStabilityMetricsProvider recorder(&prefs_, unclean_time);
+
+ // Any crash with a last_live_timestamp equal to unclean_time will
+ // be logged as a system crash as per the implementation of
+ // TestingStabilityMetricsProvider, so this will log a system crash.
+ recorder.LogCrash(unclean_time);
+
+ // Record a crash with no system crash.
+ recorder.LogCrash(unclean_time - base::TimeDelta::FromMinutes(1));
+ }
+
+ {
+ StabilityMetricsProvider stability_provider(&prefs_);
+ MetricsProvider* provider = &stability_provider;
+ SystemProfileProto system_profile;
+
+ base::HistogramTester histogram_tester;
+
+ provider->ProvideStabilityMetrics(&system_profile);
+
+ const SystemProfileProto_Stability& stability = system_profile.stability();
+ // Two crashes, one system crash.
+ EXPECT_EQ(2, stability.crash_count());
+
+ histogram_tester.ExpectTotalCount("Stability.Internals.SystemCrashCount",
+ 1);
+ }
+}
+
+#endif
+
+} // namespace metrics
diff --git a/components/metrics/system_memory_stats_recorder.h b/components/metrics/system_memory_stats_recorder.h
new file mode 100644
index 0000000..bdd30f0
--- /dev/null
+++ b/components/metrics/system_memory_stats_recorder.h
@@ -0,0 +1,30 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_SYSTEM_MEMORY_STATS_RECORDER_H_
+#define COMPONENTS_METRICS_SYSTEM_MEMORY_STATS_RECORDER_H_
+
+namespace metrics {
+
+// Record a memory size in megabytes, over a potential interval up to 32 GB.
+#define UMA_HISTOGRAM_LARGE_MEMORY_MB(name, sample) \
+ UMA_HISTOGRAM_CUSTOM_COUNTS(name, sample, 1, 32768, 50)
+
+// The type of memory UMA stats to be recorded in RecordMemoryStats.
+enum RecordMemoryStatsType {
+ // When a tab was discarded.
+ RECORD_MEMORY_STATS_TAB_DISCARDED,
+
+ // Right after the renderer for contents was killed.
+ RECORD_MEMORY_STATS_CONTENTS_OOM_KILLED,
+
+ // Right after the renderer for extensions was killed.
+ RECORD_MEMORY_STATS_EXTENSIONS_OOM_KILLED,
+};
+
+void RecordMemoryStats(RecordMemoryStatsType type);
+
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_SYSTEM_MEMORY_STATS_RECORDER_H_
diff --git a/components/metrics/system_memory_stats_recorder_linux.cc b/components/metrics/system_memory_stats_recorder_linux.cc
new file mode 100644
index 0000000..c69dbaa
--- /dev/null
+++ b/components/metrics/system_memory_stats_recorder_linux.cc
@@ -0,0 +1,98 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/system_memory_stats_recorder.h"
+
+#include "base/metrics/histogram_macros.h"
+#include "base/process/process_metrics.h"
+#include "build/build_config.h"
+
+namespace metrics {
+
+// Record a size in megabytes, a potential interval from 250MB up to 32 GB.
+#define UMA_HISTOGRAM_ALLOCATED_MEGABYTES(name, sample) \
+ UMA_HISTOGRAM_CUSTOM_COUNTS(name, sample, 250, 32768, 50)
+
+// Records a statistics |sample| for UMA histogram |name|
+// using a linear distribution of buckets.
+#define UMA_HISTOGRAM_LINEAR(name, sample, max, buckets) \
+ STATIC_HISTOGRAM_POINTER_BLOCK( \
+ name, Add(sample), \
+ base::LinearHistogram::FactoryGet( \
+ name, \
+ 1, /* Minimum. The 0 bin for underflow is automatically added. */ \
+ max + 1, /* Ensure bucket size of |maximum| / |bucket_count|. */ \
+ buckets + 2, /* Account for the underflow and overflow bins. */ \
+ base::Histogram::kUmaTargetedHistogramFlag))
+
+#define UMA_HISTOGRAM_MEGABYTES_LINEAR(name, sample) \
+ UMA_HISTOGRAM_LINEAR(name, sample, 2500, 50)
+
+void RecordMemoryStats(RecordMemoryStatsType type) {
+ base::SystemMemoryInfoKB memory;
+ if (!base::GetSystemMemoryInfo(&memory))
+ return;
+#if defined(OS_CHROMEOS)
+ // Record graphics GEM object size in a histogram with 50 MB buckets.
+ int mem_graphics_gem_mb = 0;
+ if (memory.gem_size != -1)
+ mem_graphics_gem_mb = memory.gem_size / 1024 / 1024;
+
+ // Record shared memory (used by renderer/GPU buffers).
+ int mem_shmem_mb = memory.shmem / 1024;
+#endif
+
+ // On Intel, graphics objects are in anonymous pages, but on ARM they are
+ // not. For a total "allocated count" add in graphics pages on ARM.
+ int mem_allocated_mb = (memory.active_anon + memory.inactive_anon) / 1024;
+#if defined(OS_CHROMEOS) && defined(ARCH_CPU_ARM_FAMILY)
+ mem_allocated_mb += mem_graphics_gem_mb;
+#endif
+
+ int mem_available_mb =
+ (memory.active_file + memory.inactive_file + memory.free) / 1024;
+
+ switch (type) {
+ case RECORD_MEMORY_STATS_TAB_DISCARDED: {
+#if defined(OS_CHROMEOS)
+ UMA_HISTOGRAM_MEGABYTES_LINEAR("Tabs.Discard.MemGraphicsMB",
+ mem_graphics_gem_mb);
+ UMA_HISTOGRAM_MEGABYTES_LINEAR("Tabs.Discard.MemShmemMB", mem_shmem_mb);
+#endif
+ UMA_HISTOGRAM_ALLOCATED_MEGABYTES("Tabs.Discard.MemAllocatedMB",
+ mem_allocated_mb);
+ UMA_HISTOGRAM_LARGE_MEMORY_MB("Tabs.Discard.MemAvailableMB",
+ mem_available_mb);
+ break;
+ }
+ case RECORD_MEMORY_STATS_CONTENTS_OOM_KILLED: {
+#if defined(OS_CHROMEOS)
+ UMA_HISTOGRAM_MEGABYTES_LINEAR("Memory.OOMKill.Contents.MemGraphicsMB",
+ mem_graphics_gem_mb);
+ UMA_HISTOGRAM_MEGABYTES_LINEAR("Memory.OOMKill.Contents.MemShmemMB",
+ mem_shmem_mb);
+#endif
+ UMA_HISTOGRAM_ALLOCATED_MEGABYTES(
+ "Memory.OOMKill.Contents.MemAllocatedMB", mem_allocated_mb);
+ UMA_HISTOGRAM_LARGE_MEMORY_MB("Memory.OOMKill.Contents.MemAvailableMB",
+ mem_available_mb);
+ break;
+ }
+ case RECORD_MEMORY_STATS_EXTENSIONS_OOM_KILLED: {
+#if defined(OS_CHROMEOS)
+ UMA_HISTOGRAM_MEGABYTES_LINEAR("Memory.OOMKill.Extensions.MemGraphicsMB",
+ mem_graphics_gem_mb);
+ UMA_HISTOGRAM_MEGABYTES_LINEAR("Memory.OOMKill.Extensions.MemShmemMB",
+ mem_shmem_mb);
+#endif
+ UMA_HISTOGRAM_ALLOCATED_MEGABYTES(
+ "Memory.OOMKill.Extensions.MemAllocatedMB", mem_allocated_mb);
+ UMA_HISTOGRAM_LARGE_MEMORY_MB("Memory.OOMKill.Extensions.MemAvailableMB",
+ mem_available_mb);
+ break;
+ }
+ }
+}
+
+} // namespace metrics
diff --git a/components/metrics/system_memory_stats_recorder_win.cc b/components/metrics/system_memory_stats_recorder_win.cc
new file mode 100644
index 0000000..7d01182
--- /dev/null
+++ b/components/metrics/system_memory_stats_recorder_win.cc
@@ -0,0 +1,47 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/system_memory_stats_recorder.h"
+
+#include <windows.h>
+
+#include "base/metrics/histogram_macros.h"
+#include "base/process/process_metrics.h"
+
+namespace metrics {
+namespace {
+enum { kMBytes = 1024 * 1024 };
+}
+
+void RecordMemoryStats(RecordMemoryStatsType type) {
+ MEMORYSTATUSEX mem_status;
+ mem_status.dwLength = sizeof(mem_status);
+ if (!::GlobalMemoryStatusEx(&mem_status))
+ return;
+
+ switch (type) {
+ case RECORD_MEMORY_STATS_TAB_DISCARDED: {
+ UMA_HISTOGRAM_CUSTOM_COUNTS("Memory.Stats.Win.MemoryLoad",
+ mem_status.dwMemoryLoad, 1, 100, 101);
+ UMA_HISTOGRAM_LARGE_MEMORY_MB("Memory.Stats.Win.TotalPhys2",
+ mem_status.ullTotalPhys / kMBytes);
+ UMA_HISTOGRAM_LARGE_MEMORY_MB("Memory.Stats.Win.AvailPhys2",
+ mem_status.ullAvailPhys / kMBytes);
+ UMA_HISTOGRAM_LARGE_MEMORY_MB("Memory.Stats.Win.TotalPageFile2",
+ mem_status.ullTotalPageFile / kMBytes);
+ UMA_HISTOGRAM_LARGE_MEMORY_MB("Memory.Stats.Win.AvailPageFile2",
+ mem_status.ullAvailPageFile / kMBytes);
+ UMA_HISTOGRAM_LARGE_MEMORY_MB("Memory.Stats.Win.TotalVirtual2",
+ mem_status.ullTotalVirtual / kMBytes);
+ UMA_HISTOGRAM_LARGE_MEMORY_MB("Memory.Stats.Win.AvailVirtual2",
+ mem_status.ullAvailVirtual / kMBytes);
+ break;
+ }
+ default:
+ NOTREACHED() << "Received unexpected notification";
+ break;
+ }
+}
+
+} // namespace metrics
diff --git a/components/metrics/system_session_analyzer_win.cc b/components/metrics/system_session_analyzer_win.cc
new file mode 100644
index 0000000..e64501f
--- /dev/null
+++ b/components/metrics/system_session_analyzer_win.cc
@@ -0,0 +1,263 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/system_session_analyzer_win.h"
+
+#include "base/macros.h"
+#include "base/time/time.h"
+
+namespace metrics {
+
+namespace {
+
+// The name of the log channel to query.
+const wchar_t kChannelName[] = L"System";
+
+// Event ids of system startup / shutdown events. These were obtained from
+// inspection of the System log in Event Viewer on Windows 10:
+// - id 6005: "The Event log service was started."
+// - id 6006: "The Event log service was stopped."
+// - id 6008: "The previous system shutdown at <time> on <date> was
+// unexpected."
+const uint16_t kIdSessionStart = 6005U;
+const uint16_t kIdSessionEnd = 6006U;
+const uint16_t kIdSessionEndUnclean = 6008U;
+
+// An XPATH expression to query for system startup / shutdown events. The query
+// is expected to retrieve exactly one event for each startup (kIdSessionStart)
+// and one event for each shutdown (either kIdSessionEnd or
+// kIdSessionEndUnclean).
+const wchar_t kSessionEventsQuery[] =
+ L"*[System[Provider[@Name='eventlog']"
+ L" and (EventID=6005 or EventID=6006 or EventID=6008)]]";
+
+// XPath expressions to attributes of interest.
+const wchar_t kEventIdPath[] = L"Event/System/EventID";
+const wchar_t kEventTimePath[] = L"Event/System/TimeCreated/@SystemTime";
+
+// The timeout to use for calls to ::EvtNext.
+const uint32_t kTimeoutMs = 5000;
+
+base::Time ULLFileTimeToTime(ULONGLONG time_ulonglong) {
+ // Copy low / high parts as FILETIME is not always 64bit aligned.
+ ULARGE_INTEGER time;
+ time.QuadPart = time_ulonglong;
+ FILETIME ft;
+ ft.dwLowDateTime = time.LowPart;
+ ft.dwHighDateTime = time.HighPart;
+
+ return base::Time::FromFileTime(ft);
+}
+
+bool GetEventInfo(EVT_HANDLE context,
+ EVT_HANDLE event,
+ SystemSessionAnalyzer::EventInfo* info) {
+ DCHECK(context);
+ DCHECK(event);
+ DCHECK(info);
+
+ // Retrieve attributes of interest from the event. We expect the context to
+ // specify the retrieval of two attributes (event id and event time), each
+ // with a specific type.
+ const DWORD kAttributeCnt = 2U;
+ std::vector<EVT_VARIANT> buffer(kAttributeCnt);
+ DWORD buffer_size = kAttributeCnt * sizeof(EVT_VARIANT);
+ DWORD buffer_used = 0U;
+ DWORD retrieved_attribute_cnt = 0U;
+ if (!::EvtRender(context, event, EvtRenderEventValues, buffer_size,
+ buffer.data(), &buffer_used, &retrieved_attribute_cnt)) {
+ DLOG(ERROR) << "Failed to render the event.";
+ return false;
+ }
+
+ // Validate the count and types of the retrieved attributes.
+ if ((retrieved_attribute_cnt != kAttributeCnt) ||
+ (buffer[0].Type != EvtVarTypeUInt16) ||
+ (buffer[1].Type != EvtVarTypeFileTime)) {
+ return false;
+ }
+
+ info->event_id = buffer[0].UInt16Val;
+ info->event_time = ULLFileTimeToTime(buffer[1].FileTimeVal);
+
+ return true;
+}
+
+} // namespace
+
+SystemSessionAnalyzer::SystemSessionAnalyzer(uint32_t max_session_cnt)
+ : max_session_cnt_(max_session_cnt), sessions_queried_(0) {}
+
+SystemSessionAnalyzer::~SystemSessionAnalyzer() {}
+
+SystemSessionAnalyzer::Status SystemSessionAnalyzer::IsSessionUnclean(
+ base::Time timestamp) {
+ if (!EnsureInitialized())
+ return FAILED;
+
+ while (timestamp < coverage_start_ && sessions_queried_ < max_session_cnt_) {
+ // Fetch the next session start and end events.
+ std::vector<EventInfo> events;
+ if (!FetchEvents(2U, &events) || events.size() != 2)
+ return FAILED;
+
+ if (!ProcessSession(events[0], events[1]))
+ return FAILED;
+
+ ++sessions_queried_;
+ }
+
+ if (timestamp < coverage_start_)
+ return OUTSIDE_RANGE;
+
+ // Get the first session starting after the timestamp.
+ std::map<base::Time, base::TimeDelta>::const_iterator it =
+ unclean_sessions_.upper_bound(timestamp);
+ if (it == unclean_sessions_.begin())
+ return CLEAN; // No prior unclean session.
+
+ // Get the previous session and see if it encompasses the timestamp.
+ --it;
+ bool is_spanned = (timestamp - it->first) <= it->second;
+ return is_spanned ? UNCLEAN : CLEAN;
+}
+
+bool SystemSessionAnalyzer::FetchEvents(size_t requested_events,
+ std::vector<EventInfo>* event_infos) {
+ DCHECK(event_infos);
+
+ if (!EnsureHandlesOpened())
+ return false;
+
+ DCHECK(query_handle_.get());
+
+ // Retrieve events: 2 events per session, plus the current session's start.
+ DWORD desired_event_cnt = requested_events;
+ std::vector<EVT_HANDLE> events_raw(desired_event_cnt, NULL);
+ DWORD event_cnt = 0U;
+ BOOL success = ::EvtNext(query_handle_.get(), desired_event_cnt,
+ events_raw.data(), kTimeoutMs, 0, &event_cnt);
+
+ // Ensure handles get closed. The MSDN sample seems to imply handles may need
+ // to be closed event in if EvtNext failed.
+ std::vector<EvtHandle> events(desired_event_cnt);
+ for (size_t i = 0; i < event_cnt; ++i)
+ events[i].reset(events_raw[i]);
+
+ if (!success) {
+ DLOG(ERROR) << "Failed to retrieve events.";
+ return false;
+ }
+
+ std::vector<EventInfo> event_infos_tmp;
+ event_infos_tmp.reserve(event_cnt);
+
+ EventInfo info = {};
+ for (size_t i = 0; i < event_cnt; ++i) {
+ if (!GetEventInfo(render_context_.get(), events[i].get(), &info))
+ return false;
+ event_infos_tmp.push_back(info);
+ }
+
+ event_infos->swap(event_infos_tmp);
+ return true;
+}
+
+bool SystemSessionAnalyzer::EnsureInitialized() {
+ if (!initialized_) {
+ DCHECK(!init_success_);
+ init_success_ = Initialize();
+ initialized_ = true;
+ }
+
+ return init_success_;
+}
+
+bool SystemSessionAnalyzer::EnsureHandlesOpened() {
+ // Create the event query.
+ // Note: requesting events from newest to oldest.
+ if (!query_handle_.get()) {
+ query_handle_.reset(
+ ::EvtQuery(nullptr, kChannelName, kSessionEventsQuery,
+ EvtQueryChannelPath | EvtQueryReverseDirection));
+ if (!query_handle_.get()) {
+ DLOG(ERROR) << "Event query failed.";
+ return false;
+ }
+ }
+
+ if (!render_context_.get()) {
+ // Create the render context for extracting information from the events.
+ render_context_ = CreateRenderContext();
+ if (!render_context_.get())
+ return false;
+ }
+
+ return true;
+}
+
+bool SystemSessionAnalyzer::Initialize() {
+ DCHECK(!initialized_);
+
+ // Fetch the first (current) session start event and the first session,
+ // comprising an end and a start event for a total of 3 events.
+ std::vector<EventInfo> events;
+ if (!FetchEvents(3U, &events))
+ return false;
+
+ // Validate that the initial event is what we expect.
+ if (events.size() != 3 || events[0].event_id != kIdSessionStart)
+ return false;
+
+ // Initialize the coverage start to allow detecting event time inversion.
+ coverage_start_ = events[0].event_time;
+
+ if (!ProcessSession(events[1], events[2]))
+ return false;
+
+ sessions_queried_ = 1;
+
+ return true;
+}
+
+bool SystemSessionAnalyzer::ProcessSession(const EventInfo& end,
+ const EventInfo& start) {
+ // Validate the ordering of events (newest to oldest). The expectation is a
+ // (start / [unclean]shutdown) pair of events for each session.
+ if (coverage_start_ < end.event_time)
+ return false;
+ if (end.event_time < start.event_time)
+ return false;
+
+ // Process a (start / shutdown) event pair, validating the types of events
+ // and recording unclean sessions.
+ if (start.event_id != kIdSessionStart)
+ return false; // Unexpected event type.
+ if (end.event_id != kIdSessionEnd && end.event_id != kIdSessionEndUnclean)
+ return false; // Unexpected event type.
+
+ if (end.event_id == kIdSessionEndUnclean) {
+ unclean_sessions_.insert(
+ std::make_pair(start.event_time, end.event_time - start.event_time));
+ }
+
+ coverage_start_ = start.event_time;
+
+ return true;
+}
+
+SystemSessionAnalyzer::EvtHandle SystemSessionAnalyzer::CreateRenderContext() {
+ LPCWSTR value_paths[] = {kEventIdPath, kEventTimePath};
+ const DWORD kValueCnt = arraysize(value_paths);
+
+ EVT_HANDLE context = nullptr;
+ context =
+ ::EvtCreateRenderContext(kValueCnt, value_paths, EvtRenderContextValues);
+ if (!context)
+ DLOG(ERROR) << "Failed to create render context.";
+
+ return EvtHandle(context);
+}
+
+} // namespace metrics
diff --git a/components/metrics/system_session_analyzer_win.h b/components/metrics/system_session_analyzer_win.h
new file mode 100644
index 0000000..2bd83e1
--- /dev/null
+++ b/components/metrics/system_session_analyzer_win.h
@@ -0,0 +1,102 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_BROWSER_WATCHER_SYSTEM_SESSION_ANALYZER_WIN_H_
+#define COMPONENTS_BROWSER_WATCHER_SYSTEM_SESSION_ANALYZER_WIN_H_
+
+#include <windows.h>
+#include <winevt.h>
+
+#include <map>
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "base/gtest_prod_util.h"
+#include "base/time/time.h"
+
+namespace metrics {
+
+// Analyzes system session events for unclean sessions. Initialization is
+// expensive and therefore done lazily, as the analyzer is instantiated before
+// knowing whether it will be used.
+class SystemSessionAnalyzer {
+ public:
+ enum Status {
+ FAILED = 0,
+ CLEAN = 1,
+ UNCLEAN = 2,
+ OUTSIDE_RANGE = 3,
+ };
+
+ // Minimal information about a log event.
+ struct EventInfo {
+ uint16_t event_id;
+ base::Time event_time;
+ };
+
+ // Creates a SystemSessionAnalyzer that will analyze system sessions based on
+ // events pertaining to as many as |max_session_cnt| of the most recent system
+ // sessions.
+ explicit SystemSessionAnalyzer(uint32_t max_session_cnt);
+ virtual ~SystemSessionAnalyzer();
+
+ // Returns an analysis status for the system session that contains
+ // |timestamp|.
+ virtual Status IsSessionUnclean(base::Time timestamp);
+
+ protected:
+ // Queries for the next |requested_events|. On success, returns true and
+ // |event_infos| contains up to |requested_events| events ordered from newest
+ // to oldest.
+ // Returns false otherwise. Virtual for unit testing.
+ virtual bool FetchEvents(size_t requested_events,
+ std::vector<EventInfo>* event_infos);
+
+ private:
+ struct EvtHandleCloser {
+ using pointer = EVT_HANDLE;
+ void operator()(EVT_HANDLE handle) const {
+ if (handle)
+ ::EvtClose(handle);
+ }
+ };
+ using EvtHandle = std::unique_ptr<EVT_HANDLE, EvtHandleCloser>;
+
+ FRIEND_TEST_ALL_PREFIXES(SystemSessionAnalyzerTest, FetchEvents);
+
+ bool EnsureInitialized();
+ bool EnsureHandlesOpened();
+ bool Initialize();
+ // Validates that |end| and |start| have sane event IDs and event times.
+ // Updates |coverage_start_| and adds the session to unclean_sessions_
+ // as appropriate.
+ bool ProcessSession(const EventInfo& end, const EventInfo& start);
+
+ EvtHandle CreateRenderContext();
+
+ // The maximal number of sessions to query events for.
+ uint32_t max_session_cnt_;
+ uint32_t sessions_queried_;
+
+ bool initialized_ = false;
+ bool init_success_ = false;
+
+ // A handle to the query, valid after a successful initialize.
+ EvtHandle query_handle_;
+ // A handle to the event render context, valid after a successful initialize.
+ EvtHandle render_context_;
+
+ // Information about unclean sessions: start time to session duration.
+ std::map<base::Time, base::TimeDelta> unclean_sessions_;
+
+ // Timestamp of the oldest event.
+ base::Time coverage_start_;
+
+ DISALLOW_COPY_AND_ASSIGN(SystemSessionAnalyzer);
+};
+
+} // namespace metrics
+
+#endif // COMPONENTS_BROWSER_WATCHER_SYSTEM_SESSION_ANALYZER_WIN_H_
diff --git a/components/metrics/system_session_analyzer_win_unittest.cc b/components/metrics/system_session_analyzer_win_unittest.cc
new file mode 100644
index 0000000..bd7b1f8
--- /dev/null
+++ b/components/metrics/system_session_analyzer_win_unittest.cc
@@ -0,0 +1,125 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/system_session_analyzer_win.h"
+
+#include <algorithm>
+#include <utility>
+#include <vector>
+
+#include "base/time/time.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace metrics {
+
+namespace {
+
+const uint16_t kIdSessionStart = 6005U;
+const uint16_t kIdSessionEnd = 6006U;
+const uint16_t kIdSessionEndUnclean = 6008U;
+
+} // namespace
+
+// Ensure the fetcher retrieves events.
+TEST(SystemSessionAnalyzerTest, FetchEvents) {
+ SystemSessionAnalyzer analyzer(0);
+ std::vector<SystemSessionAnalyzer::EventInfo> events;
+ ASSERT_TRUE(analyzer.FetchEvents(1U, &events));
+ EXPECT_EQ(1U, events.size());
+}
+
+// Ensure the fetcher's retrieved events conform to our expectations.
+// Note: this test fails if the host system doesn't have at least 1 prior
+// session.
+TEST(SystemSessionAnalyzerTest, ValidateEvents) {
+ SystemSessionAnalyzer analyzer(1U);
+ EXPECT_EQ(SystemSessionAnalyzer::CLEAN,
+ analyzer.IsSessionUnclean(base::Time::Now()));
+}
+
+// Stubs FetchEvents.
+class StubSystemSessionAnalyzer : public SystemSessionAnalyzer {
+ public:
+ StubSystemSessionAnalyzer(uint32_t max_session_cnt)
+ : SystemSessionAnalyzer(max_session_cnt) {}
+
+ bool FetchEvents(size_t requested_events,
+ std::vector<EventInfo>* event_infos) override {
+ DCHECK(event_infos);
+ size_t num_to_copy = std::min(requested_events, events_.size());
+ if (num_to_copy) {
+ event_infos->clear();
+ event_infos->insert(event_infos->begin(), events_.begin(),
+ events_.begin() + num_to_copy);
+ events_.erase(events_.begin(), events_.begin() + num_to_copy);
+ }
+
+ return true;
+ }
+
+ void AddEvent(const EventInfo& info) { events_.push_back(info); }
+
+ private:
+ std::vector<EventInfo> events_;
+};
+
+TEST(SystemSessionAnalyzerTest, StandardCase) {
+ StubSystemSessionAnalyzer analyzer(2U);
+
+ base::Time time = base::Time::Now();
+ analyzer.AddEvent({kIdSessionStart, time});
+ analyzer.AddEvent(
+ {kIdSessionEndUnclean, time - base::TimeDelta::FromSeconds(10)});
+ analyzer.AddEvent({kIdSessionStart, time - base::TimeDelta::FromSeconds(20)});
+ analyzer.AddEvent({kIdSessionEnd, time - base::TimeDelta::FromSeconds(22)});
+ analyzer.AddEvent({kIdSessionStart, time - base::TimeDelta::FromSeconds(28)});
+
+ EXPECT_EQ(SystemSessionAnalyzer::OUTSIDE_RANGE,
+ analyzer.IsSessionUnclean(time - base::TimeDelta::FromSeconds(30)));
+ EXPECT_EQ(SystemSessionAnalyzer::CLEAN,
+ analyzer.IsSessionUnclean(time - base::TimeDelta::FromSeconds(25)));
+ EXPECT_EQ(SystemSessionAnalyzer::UNCLEAN,
+ analyzer.IsSessionUnclean(time - base::TimeDelta::FromSeconds(20)));
+ EXPECT_EQ(SystemSessionAnalyzer::UNCLEAN,
+ analyzer.IsSessionUnclean(time - base::TimeDelta::FromSeconds(15)));
+ EXPECT_EQ(SystemSessionAnalyzer::UNCLEAN,
+ analyzer.IsSessionUnclean(time - base::TimeDelta::FromSeconds(10)));
+ EXPECT_EQ(SystemSessionAnalyzer::CLEAN,
+ analyzer.IsSessionUnclean(time - base::TimeDelta::FromSeconds(5)));
+ EXPECT_EQ(SystemSessionAnalyzer::CLEAN,
+ analyzer.IsSessionUnclean(time + base::TimeDelta::FromSeconds(5)));
+}
+
+TEST(SystemSessionAnalyzerTest, NoEvent) {
+ StubSystemSessionAnalyzer analyzer(0U);
+ EXPECT_EQ(SystemSessionAnalyzer::FAILED,
+ analyzer.IsSessionUnclean(base::Time::Now()));
+}
+
+TEST(SystemSessionAnalyzerTest, TimeInversion) {
+ StubSystemSessionAnalyzer analyzer(1U);
+
+ base::Time time = base::Time::Now();
+ analyzer.AddEvent({kIdSessionStart, time});
+ analyzer.AddEvent({kIdSessionEnd, time + base::TimeDelta::FromSeconds(1)});
+ analyzer.AddEvent({kIdSessionStart, time - base::TimeDelta::FromSeconds(1)});
+
+ EXPECT_EQ(SystemSessionAnalyzer::FAILED,
+ analyzer.IsSessionUnclean(base::Time::Now()));
+}
+
+TEST(SystemSessionAnalyzerTest, IdInversion) {
+ StubSystemSessionAnalyzer analyzer(1U);
+
+ base::Time time = base::Time::Now();
+ analyzer.AddEvent({kIdSessionStart, time});
+ analyzer.AddEvent({kIdSessionStart, time - base::TimeDelta::FromSeconds(1)});
+ analyzer.AddEvent({kIdSessionEnd, time - base::TimeDelta::FromSeconds(2)});
+
+ EXPECT_EQ(SystemSessionAnalyzer::FAILED,
+ analyzer.IsSessionUnclean(base::Time::Now()));
+}
+
+} // namespace metrics
diff --git a/components/metrics/test_enabled_state_provider.cc b/components/metrics/test_enabled_state_provider.cc
new file mode 100644
index 0000000..d8c59e4
--- /dev/null
+++ b/components/metrics/test_enabled_state_provider.cc
@@ -0,0 +1,17 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/test_enabled_state_provider.h"
+
+namespace metrics {
+
+bool TestEnabledStateProvider::IsConsentGiven() const {
+ return consent_;
+}
+
+bool TestEnabledStateProvider::IsReportingEnabled() const {
+ return enabled_;
+}
+
+} // namespace metrics
diff --git a/components/metrics/test_enabled_state_provider.h b/components/metrics/test_enabled_state_provider.h
new file mode 100644
index 0000000..29fd7f1
--- /dev/null
+++ b/components/metrics/test_enabled_state_provider.h
@@ -0,0 +1,37 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_TEST_ENABLED_STATE_PROVIDER_H_
+#define COMPONENTS_METRICS_TEST_ENABLED_STATE_PROVIDER_H_
+
+#include "base/macros.h"
+#include "components/metrics/enabled_state_provider.h"
+
+namespace metrics {
+
+// A simple concrete implementation of the EnabledStateProvider interface, for
+// use in tests.
+class TestEnabledStateProvider : public EnabledStateProvider {
+ public:
+ TestEnabledStateProvider(bool consent, bool enabled)
+ : consent_(consent), enabled_(enabled) {}
+ ~TestEnabledStateProvider() override {}
+
+ // EnabledStateProvider
+ bool IsConsentGiven() const override;
+ bool IsReportingEnabled() const override;
+
+ void set_consent(bool consent) { consent_ = consent; }
+ void set_enabled(bool enabled) { enabled_ = enabled; }
+
+ private:
+ bool consent_;
+ bool enabled_;
+
+ DISALLOW_COPY_AND_ASSIGN(TestEnabledStateProvider);
+};
+
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_TEST_ENABLED_STATE_PROVIDER_H_
diff --git a/components/metrics/test_metrics_log_uploader.cc b/components/metrics/test_metrics_log_uploader.cc
new file mode 100644
index 0000000..acb49d3
--- /dev/null
+++ b/components/metrics/test_metrics_log_uploader.cc
@@ -0,0 +1,31 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/test_metrics_log_uploader.h"
+#include "components/metrics/metrics_log_uploader.h"
+
+namespace metrics {
+
+TestMetricsLogUploader::TestMetricsLogUploader(
+ const MetricsLogUploader::UploadCallback& on_upload_complete)
+ : on_upload_complete_(on_upload_complete), is_uploading_(false) {}
+
+TestMetricsLogUploader::~TestMetricsLogUploader() = default;
+
+void TestMetricsLogUploader::CompleteUpload(int response_code) {
+ DCHECK(is_uploading_);
+ is_uploading_ = false;
+ last_reporting_info_.Clear();
+ on_upload_complete_.Run(response_code, 0, false);
+}
+
+void TestMetricsLogUploader::UploadLog(const std::string& compressed_log_data,
+ const std::string& log_hash,
+ const ReportingInfo& reporting_info) {
+ DCHECK(!is_uploading_);
+ is_uploading_ = true;
+ last_reporting_info_ = reporting_info;
+}
+
+} // namespace metrics
diff --git a/components/metrics/test_metrics_log_uploader.h b/components/metrics/test_metrics_log_uploader.h
new file mode 100644
index 0000000..c8d724a
--- /dev/null
+++ b/components/metrics/test_metrics_log_uploader.h
@@ -0,0 +1,42 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_TEST_METRICS_LOG_UPLOADER_H_
+#define COMPONENTS_METRICS_TEST_METRICS_LOG_UPLOADER_H_
+
+#include "components/metrics/metrics_log_uploader.h"
+#include "third_party/metrics_proto/reporting_info.pb.h"
+
+namespace metrics {
+
+class TestMetricsLogUploader : public MetricsLogUploader {
+ public:
+ explicit TestMetricsLogUploader(
+ const MetricsLogUploader::UploadCallback& on_upload_complete);
+ ~TestMetricsLogUploader() override;
+
+ // Mark the current upload complete with the given response code.
+ void CompleteUpload(int response_code);
+
+ // Check if UploadLog has been called.
+ bool is_uploading() const { return is_uploading_; }
+
+ const ReportingInfo& reporting_info() const { return last_reporting_info_; }
+
+ private:
+ // MetricsLogUploader:
+ void UploadLog(const std::string& compressed_log_data,
+ const std::string& log_hash,
+ const ReportingInfo& reporting_info) override;
+
+ const MetricsLogUploader::UploadCallback on_upload_complete_;
+ ReportingInfo last_reporting_info_;
+ bool is_uploading_;
+
+ DISALLOW_COPY_AND_ASSIGN(TestMetricsLogUploader);
+};
+
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_TEST_METRICS_LOG_UPLOADER_H_
diff --git a/components/metrics/test_metrics_provider.cc b/components/metrics/test_metrics_provider.cc
new file mode 100644
index 0000000..1c7bb5e
--- /dev/null
+++ b/components/metrics/test_metrics_provider.cc
@@ -0,0 +1,42 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/test_metrics_provider.h"
+
+#include "base/metrics/histogram_macros.h"
+
+namespace metrics {
+
+void TestMetricsProvider::Init() {
+ init_called_ = true;
+}
+
+void TestMetricsProvider::OnRecordingDisabled() {
+ on_recording_disabled_called_ = true;
+}
+
+bool TestMetricsProvider::HasPreviousSessionData() {
+ has_initial_stability_metrics_called_ = true;
+ return has_initial_stability_metrics_;
+}
+
+void TestMetricsProvider::ProvidePreviousSessionData(
+ ChromeUserMetricsExtension* uma_proto) {
+ UMA_STABILITY_HISTOGRAM_ENUMERATION("TestMetricsProvider.Initial", 1, 2);
+ provide_initial_stability_metrics_called_ = true;
+ ProvideCurrentSessionData(nullptr);
+}
+
+void TestMetricsProvider::ProvideCurrentSessionData(
+ ChromeUserMetricsExtension* uma_proto) {
+ UMA_STABILITY_HISTOGRAM_ENUMERATION("TestMetricsProvider.Regular", 1, 2);
+ provide_stability_metrics_called_ = true;
+}
+
+void TestMetricsProvider::ProvideSystemProfileMetrics(
+ SystemProfileProto* system_profile_proto) {
+ provide_system_profile_metrics_called_ = true;
+}
+
+} // namespace metrics
diff --git a/components/metrics/test_metrics_provider.h b/components/metrics/test_metrics_provider.h
new file mode 100644
index 0000000..2578954
--- /dev/null
+++ b/components/metrics/test_metrics_provider.h
@@ -0,0 +1,69 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_TEST_METRICS_PROVIDER_H_
+#define COMPONENTS_METRICS_TEST_METRICS_PROVIDER_H_
+
+#include "base/macros.h"
+#include "components/metrics/metrics_provider.h"
+
+namespace metrics {
+
+// A simple implementation of MetricsProvider that checks that its providing
+// functions are called, for use in tests.
+class TestMetricsProvider : public MetricsProvider {
+ public:
+ TestMetricsProvider()
+ : init_called_(false),
+ on_recording_disabled_called_(false),
+ has_initial_stability_metrics_(false),
+ has_initial_stability_metrics_called_(false),
+ provide_initial_stability_metrics_called_(false),
+ provide_stability_metrics_called_(false),
+ provide_system_profile_metrics_called_(false) {}
+
+ // MetricsProvider:
+ void Init() override;
+ void OnRecordingDisabled() override;
+ bool HasPreviousSessionData() override;
+ void ProvidePreviousSessionData(
+ ChromeUserMetricsExtension* uma_proto) override;
+ void ProvideCurrentSessionData(
+ ChromeUserMetricsExtension* uma_proto) override;
+ void ProvideSystemProfileMetrics(
+ SystemProfileProto* system_profile_proto) override;
+
+ bool init_called() { return init_called_; }
+ bool on_recording_disabled_called() { return on_recording_disabled_called_; }
+ bool has_initial_stability_metrics_called() {
+ return has_initial_stability_metrics_called_;
+ }
+ void set_has_initial_stability_metrics(bool has_initial_stability_metrics) {
+ has_initial_stability_metrics_ = has_initial_stability_metrics;
+ }
+ bool provide_initial_stability_metrics_called() const {
+ return provide_initial_stability_metrics_called_;
+ }
+ bool provide_stability_metrics_called() const {
+ return provide_stability_metrics_called_;
+ }
+ bool provide_system_profile_metrics_called() const {
+ return provide_system_profile_metrics_called_;
+ }
+
+ private:
+ bool init_called_;
+ bool on_recording_disabled_called_;
+ bool has_initial_stability_metrics_;
+ bool has_initial_stability_metrics_called_;
+ bool provide_initial_stability_metrics_called_;
+ bool provide_stability_metrics_called_;
+ bool provide_system_profile_metrics_called_;
+
+ DISALLOW_COPY_AND_ASSIGN(TestMetricsProvider);
+};
+
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_TEST_METRICS_PROVIDER_H_
diff --git a/components/metrics/test_metrics_service_client.cc b/components/metrics/test_metrics_service_client.cc
new file mode 100644
index 0000000..4859b4e
--- /dev/null
+++ b/components/metrics/test_metrics_service_client.cc
@@ -0,0 +1,89 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/test_metrics_service_client.h"
+
+#include <memory>
+
+#include "base/callback.h"
+#include "components/metrics/metrics_log_uploader.h"
+#include "third_party/metrics_proto/chrome_user_metrics_extension.pb.h"
+
+namespace metrics {
+
+// static
+const char TestMetricsServiceClient::kBrandForTesting[] = "brand_for_testing";
+
+TestMetricsServiceClient::TestMetricsServiceClient()
+ : version_string_("5.0.322.0-64-devel"),
+ product_(ChromeUserMetricsExtension::CHROME),
+ reporting_is_managed_(false),
+ enable_default_(EnableMetricsDefault::DEFAULT_UNKNOWN) {}
+
+TestMetricsServiceClient::~TestMetricsServiceClient() {
+}
+
+metrics::MetricsService* TestMetricsServiceClient::GetMetricsService() {
+ return nullptr;
+}
+
+void TestMetricsServiceClient::SetMetricsClientId(
+ const std::string& client_id) {
+ client_id_ = client_id;
+}
+
+int32_t TestMetricsServiceClient::GetProduct() {
+ return product_;
+}
+
+std::string TestMetricsServiceClient::GetApplicationLocale() {
+ return "en-US";
+}
+
+bool TestMetricsServiceClient::GetBrand(std::string* brand_code) {
+ *brand_code = kBrandForTesting;
+ return true;
+}
+
+SystemProfileProto::Channel TestMetricsServiceClient::GetChannel() {
+ return SystemProfileProto::CHANNEL_BETA;
+}
+
+std::string TestMetricsServiceClient::GetVersionString() {
+ return version_string_;
+}
+
+void TestMetricsServiceClient::CollectFinalMetricsForLog(
+ const base::Closure& done_callback) {
+ done_callback.Run();
+}
+
+std::unique_ptr<MetricsLogUploader> TestMetricsServiceClient::CreateUploader(
+ base::StringPiece server_url,
+ base::StringPiece insecure_server_url,
+ base::StringPiece mime_type,
+ MetricsLogUploader::MetricServiceType service_type,
+ const MetricsLogUploader::UploadCallback& on_upload_complete) {
+ uploader_ = new TestMetricsLogUploader(on_upload_complete);
+ return std::unique_ptr<MetricsLogUploader>(uploader_);
+}
+
+base::TimeDelta TestMetricsServiceClient::GetStandardUploadInterval() {
+ return base::TimeDelta::FromMinutes(5);
+}
+
+bool TestMetricsServiceClient::IsReportingPolicyManaged() {
+ return reporting_is_managed_;
+}
+
+EnableMetricsDefault
+TestMetricsServiceClient::GetMetricsReportingDefaultState() {
+ return enable_default_;
+}
+
+std::string TestMetricsServiceClient::GetAppPackageName() {
+ return "test app";
+}
+
+} // namespace metrics
diff --git a/components/metrics/test_metrics_service_client.h b/components/metrics/test_metrics_service_client.h
new file mode 100644
index 0000000..c495536
--- /dev/null
+++ b/components/metrics/test_metrics_service_client.h
@@ -0,0 +1,75 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_TEST_METRICS_SERVICE_CLIENT_H_
+#define COMPONENTS_METRICS_TEST_METRICS_SERVICE_CLIENT_H_
+
+#include <stdint.h>
+
+#include <string>
+
+#include "base/macros.h"
+#include "components/metrics/metrics_log_uploader.h"
+#include "components/metrics/metrics_service_client.h"
+#include "components/metrics/test_metrics_log_uploader.h"
+
+namespace metrics {
+
+// A simple concrete implementation of the MetricsServiceClient interface, for
+// use in tests.
+class TestMetricsServiceClient : public MetricsServiceClient {
+ public:
+ static const char kBrandForTesting[];
+
+ TestMetricsServiceClient();
+ ~TestMetricsServiceClient() override;
+
+ // MetricsServiceClient:
+ metrics::MetricsService* GetMetricsService() override;
+ void SetMetricsClientId(const std::string& client_id) override;
+ int32_t GetProduct() override;
+ std::string GetApplicationLocale() override;
+ bool GetBrand(std::string* brand_code) override;
+ SystemProfileProto::Channel GetChannel() override;
+ std::string GetVersionString() override;
+ void CollectFinalMetricsForLog(const base::Closure& done_callback) override;
+ std::unique_ptr<MetricsLogUploader> CreateUploader(
+ base::StringPiece server_url,
+ base::StringPiece insecure_server_url,
+ base::StringPiece mime_type,
+ MetricsLogUploader::MetricServiceType service_type,
+ const MetricsLogUploader::UploadCallback& on_upload_complete) override;
+ base::TimeDelta GetStandardUploadInterval() override;
+ bool IsReportingPolicyManaged() override;
+ EnableMetricsDefault GetMetricsReportingDefaultState() override;
+ std::string GetAppPackageName() override;
+
+ const std::string& get_client_id() const { return client_id_; }
+ // Returns a weak ref to the last created uploader.
+ TestMetricsLogUploader* uploader() { return uploader_; }
+ void set_version_string(const std::string& str) { version_string_ = str; }
+ void set_product(int32_t product) { product_ = product; }
+ void set_reporting_is_managed(bool managed) {
+ reporting_is_managed_ = managed;
+ }
+ void set_enable_default(EnableMetricsDefault enable_default) {
+ enable_default_ = enable_default;
+ }
+
+ private:
+ std::string client_id_;
+ std::string version_string_;
+ int32_t product_;
+ bool reporting_is_managed_;
+ EnableMetricsDefault enable_default_;
+
+ // A weak ref to the last created TestMetricsLogUploader.
+ TestMetricsLogUploader* uploader_;
+
+ DISALLOW_COPY_AND_ASSIGN(TestMetricsServiceClient);
+};
+
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_TEST_METRICS_SERVICE_CLIENT_H_
diff --git a/components/metrics/ui/DEPS b/components/metrics/ui/DEPS
new file mode 100644
index 0000000..e141313
--- /dev/null
+++ b/components/metrics/ui/DEPS
@@ -0,0 +1,4 @@
+include_rules = [
+ "+ui/display",
+ "+ui/gfx",
+]
diff --git a/components/metrics/ui/screen_info_metrics_provider.cc b/components/metrics/ui/screen_info_metrics_provider.cc
new file mode 100644
index 0000000..f35639a
--- /dev/null
+++ b/components/metrics/ui/screen_info_metrics_provider.cc
@@ -0,0 +1,97 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/ui/screen_info_metrics_provider.h"
+
+#include "build/build_config.h"
+#include "third_party/metrics_proto/system_profile.pb.h"
+#include "ui/display/display.h"
+#include "ui/display/screen.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+#endif
+
+namespace metrics {
+
+#if defined(OS_WIN)
+
+namespace {
+
+struct ScreenDPIInformation {
+ double max_dpi_x;
+ double max_dpi_y;
+};
+
+// Called once for each connected monitor.
+BOOL CALLBACK GetMonitorDPICallback(HMONITOR, HDC hdc, LPRECT, LPARAM dwData) {
+ const double kMillimetersPerInch = 25.4;
+ ScreenDPIInformation* screen_info =
+ reinterpret_cast<ScreenDPIInformation*>(dwData);
+ // Size of screen, in mm.
+ DWORD size_x = GetDeviceCaps(hdc, HORZSIZE);
+ DWORD size_y = GetDeviceCaps(hdc, VERTSIZE);
+ double dpi_x = (size_x > 0) ?
+ GetDeviceCaps(hdc, HORZRES) / (size_x / kMillimetersPerInch) : 0;
+ double dpi_y = (size_y > 0) ?
+ GetDeviceCaps(hdc, VERTRES) / (size_y / kMillimetersPerInch) : 0;
+ screen_info->max_dpi_x = std::max(dpi_x, screen_info->max_dpi_x);
+ screen_info->max_dpi_y = std::max(dpi_y, screen_info->max_dpi_y);
+ return TRUE;
+}
+
+void WriteScreenDPIInformationProto(SystemProfileProto::Hardware* hardware) {
+ HDC desktop_dc = GetDC(nullptr);
+ if (desktop_dc) {
+ ScreenDPIInformation si = {0, 0};
+ if (EnumDisplayMonitors(desktop_dc, nullptr, GetMonitorDPICallback,
+ reinterpret_cast<LPARAM>(&si))) {
+ hardware->set_max_dpi_x(si.max_dpi_x);
+ hardware->set_max_dpi_y(si.max_dpi_y);
+ }
+ ReleaseDC(GetDesktopWindow(), desktop_dc);
+ }
+}
+
+} // namespace
+
+#endif // defined(OS_WIN)
+
+ScreenInfoMetricsProvider::ScreenInfoMetricsProvider() {
+}
+
+ScreenInfoMetricsProvider::~ScreenInfoMetricsProvider() {
+}
+
+void ScreenInfoMetricsProvider::ProvideSystemProfileMetrics(
+ SystemProfileProto* system_profile_proto) {
+ SystemProfileProto::Hardware* hardware =
+ system_profile_proto->mutable_hardware();
+
+ const gfx::Size display_size = GetScreenSize();
+ hardware->set_primary_screen_width(display_size.width());
+ hardware->set_primary_screen_height(display_size.height());
+ hardware->set_primary_screen_scale_factor(GetScreenDeviceScaleFactor());
+ hardware->set_screen_count(GetScreenCount());
+
+#if defined(OS_WIN)
+ WriteScreenDPIInformationProto(hardware);
+#endif
+}
+
+gfx::Size ScreenInfoMetricsProvider::GetScreenSize() const {
+ return display::Screen::GetScreen()->GetPrimaryDisplay().GetSizeInPixel();
+}
+
+float ScreenInfoMetricsProvider::GetScreenDeviceScaleFactor() const {
+ return display::Screen::GetScreen()
+ ->GetPrimaryDisplay()
+ .device_scale_factor();
+}
+
+int ScreenInfoMetricsProvider::GetScreenCount() const {
+ return display::Screen::GetScreen()->GetNumDisplays();
+}
+
+} // namespace metrics
diff --git a/components/metrics/ui/screen_info_metrics_provider.h b/components/metrics/ui/screen_info_metrics_provider.h
new file mode 100644
index 0000000..51ef2b7
--- /dev/null
+++ b/components/metrics/ui/screen_info_metrics_provider.h
@@ -0,0 +1,42 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_UI_SCREEN_INFO_METRICS_PROVIDER_H_
+#define COMPONENTS_METRICS_UI_SCREEN_INFO_METRICS_PROVIDER_H_
+
+#include "base/macros.h"
+#include "components/metrics/metrics_provider.h"
+#include "ui/gfx/geometry/size.h"
+
+namespace metrics {
+
+// ScreenInfoMetricsProvider provides metrics related to screen info.
+class ScreenInfoMetricsProvider : public MetricsProvider {
+ public:
+ ScreenInfoMetricsProvider();
+ ~ScreenInfoMetricsProvider() override;
+
+ // MetricsProvider:
+ void ProvideSystemProfileMetrics(
+ SystemProfileProto* system_profile_proto) override;
+
+ protected:
+ // Exposed for the sake of mocking in test code.
+
+ // Returns the screen size for the primary monitor.
+ virtual gfx::Size GetScreenSize() const;
+
+ // Returns the device scale factor for the primary monitor.
+ virtual float GetScreenDeviceScaleFactor() const;
+
+ // Returns the number of monitors the user is using.
+ virtual int GetScreenCount() const;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ScreenInfoMetricsProvider);
+};
+
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_UI_SCREEN_INFO_METRICS_PROVIDER_H_
diff --git a/components/metrics/ui/screen_info_metrics_provider_unittest.cc b/components/metrics/ui/screen_info_metrics_provider_unittest.cc
new file mode 100644
index 0000000..836e977
--- /dev/null
+++ b/components/metrics/ui/screen_info_metrics_provider_unittest.cc
@@ -0,0 +1,66 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/ui/screen_info_metrics_provider.h"
+
+#include "base/macros.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/metrics_proto/chrome_user_metrics_extension.pb.h"
+#include "ui/gfx/geometry/size.h"
+
+namespace metrics {
+
+namespace {
+
+const int kScreenWidth = 1024;
+const int kScreenHeight = 768;
+const int kScreenCount = 3;
+const float kScreenScaleFactor = 2;
+
+class TestScreenInfoMetricsProvider : public ScreenInfoMetricsProvider {
+ public:
+ TestScreenInfoMetricsProvider() {}
+ ~TestScreenInfoMetricsProvider() override {}
+
+ private:
+ gfx::Size GetScreenSize() const override {
+ return gfx::Size(kScreenWidth, kScreenHeight);
+ }
+
+ float GetScreenDeviceScaleFactor() const override {
+ return kScreenScaleFactor;
+ }
+
+ int GetScreenCount() const override { return kScreenCount; }
+
+ DISALLOW_COPY_AND_ASSIGN(TestScreenInfoMetricsProvider);
+};
+
+} // namespace
+
+class ScreenInfoMetricsProviderTest : public testing::Test {
+ public:
+ ScreenInfoMetricsProviderTest() {}
+ ~ScreenInfoMetricsProviderTest() override {}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ScreenInfoMetricsProviderTest);
+};
+
+TEST_F(ScreenInfoMetricsProviderTest, ProvideSystemProfileMetrics) {
+ TestScreenInfoMetricsProvider provider;
+ ChromeUserMetricsExtension uma_proto;
+
+ provider.ProvideSystemProfileMetrics(uma_proto.mutable_system_profile());
+
+ // Check that the system profile has the correct values set.
+ const SystemProfileProto::Hardware& hardware =
+ uma_proto.system_profile().hardware();
+ EXPECT_EQ(kScreenWidth, hardware.primary_screen_width());
+ EXPECT_EQ(kScreenHeight, hardware.primary_screen_height());
+ EXPECT_EQ(kScreenScaleFactor, hardware.primary_screen_scale_factor());
+ EXPECT_EQ(kScreenCount, hardware.screen_count());
+}
+
+} // namespace metrics
diff --git a/components/metrics/url_constants.cc b/components/metrics/url_constants.cc
new file mode 100644
index 0000000..f60d33b
--- /dev/null
+++ b/components/metrics/url_constants.cc
@@ -0,0 +1,19 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/url_constants.h"
+
+namespace metrics {
+
+const char kNewMetricsServerUrl[] =
+ "https://clientservices.googleapis.com/uma/v2";
+
+const char kNewMetricsServerUrlInsecure[] =
+ "http://clientservices.googleapis.com/uma/v2";
+
+const char kOldMetricsServerUrl[] = "https://clients4.google.com/uma/v2";
+
+const char kDefaultMetricsMimeType[] = "application/vnd.chrome.uma";
+
+} // namespace metrics
diff --git a/components/metrics/url_constants.h b/components/metrics/url_constants.h
new file mode 100644
index 0000000..0f4c58c
--- /dev/null
+++ b/components/metrics/url_constants.h
@@ -0,0 +1,24 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_URL_CONSTANTS_H_
+#define COMPONENTS_METRICS_URL_CONSTANTS_H_
+
+namespace metrics {
+
+// The new metrics server's URL.
+extern const char kNewMetricsServerUrl[];
+
+// The HTTP fallback metrics server's URL.
+extern const char kNewMetricsServerUrlInsecure[];
+
+// The old metrics server's URL.
+extern const char kOldMetricsServerUrl[];
+
+// The default MIME type for the uploaded metrics data.
+extern const char kDefaultMetricsMimeType[];
+
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_URL_CONSTANTS_H_
diff --git a/components/metrics/version_utils.cc b/components/metrics/version_utils.cc
new file mode 100644
index 0000000..d24932b
--- /dev/null
+++ b/components/metrics/version_utils.cc
@@ -0,0 +1,40 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/version_utils.h"
+
+#include "base/logging.h"
+#include "build/build_config.h"
+#include "components/version_info/version_info.h"
+
+namespace metrics {
+
+std::string GetVersionString() {
+ std::string version = version_info::GetVersionNumber();
+#if defined(ARCH_CPU_64_BITS)
+ version += "-64";
+#endif // defined(ARCH_CPU_64_BITS)
+ if (!version_info::IsOfficialBuild())
+ version.append("-devel");
+ return version;
+}
+
+SystemProfileProto::Channel AsProtobufChannel(version_info::Channel channel) {
+ switch (channel) {
+ case version_info::Channel::UNKNOWN:
+ return SystemProfileProto::CHANNEL_UNKNOWN;
+ case version_info::Channel::CANARY:
+ return SystemProfileProto::CHANNEL_CANARY;
+ case version_info::Channel::DEV:
+ return SystemProfileProto::CHANNEL_DEV;
+ case version_info::Channel::BETA:
+ return SystemProfileProto::CHANNEL_BETA;
+ case version_info::Channel::STABLE:
+ return SystemProfileProto::CHANNEL_STABLE;
+ }
+ NOTREACHED();
+ return SystemProfileProto::CHANNEL_UNKNOWN;
+}
+
+} // namespace metrics
diff --git a/components/metrics/version_utils.h b/components/metrics/version_utils.h
new file mode 100644
index 0000000..387d664
--- /dev/null
+++ b/components/metrics/version_utils.h
@@ -0,0 +1,28 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_VERSION_UTILS_H_
+#define COMPONENTS_METRICS_VERSION_UTILS_H_
+
+#include <string>
+
+#include "third_party/metrics_proto/system_profile.pb.h"
+
+namespace version_info {
+enum class Channel;
+}
+
+namespace metrics {
+
+// Build a string including the Chrome app version, suffixed by "-64" on 64-bit
+// platforms, and "-devel" on developer builds.
+std::string GetVersionString();
+
+// Translates version_info::Channel to the equivalent
+// SystemProfileProto::Channel.
+SystemProfileProto::Channel AsProtobufChannel(version_info::Channel channel);
+
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_VERSION_UTILS_H_
diff --git a/components/metrics_services_manager/BUILD.gn b/components/metrics_services_manager/BUILD.gn
new file mode 100644
index 0000000..7be3654
--- /dev/null
+++ b/components/metrics_services_manager/BUILD.gn
@@ -0,0 +1,22 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+static_library("metrics_services_manager") {
+ sources = [
+ "metrics_services_manager.cc",
+ "metrics_services_manager.h",
+ "metrics_services_manager_client.cc",
+ "metrics_services_manager_client.h",
+ ]
+
+ deps = [
+ "//base",
+ "//components/metrics",
+ "//components/rappor",
+ "//components/ukm",
+ "//components/variations",
+ "//components/variations/service",
+ "//services/network/public/cpp:cpp",
+ ]
+}
diff --git a/components/metrics_services_manager/DEPS b/components/metrics_services_manager/DEPS
new file mode 100644
index 0000000..d90fc81
--- /dev/null
+++ b/components/metrics_services_manager/DEPS
@@ -0,0 +1,9 @@
+include_rules = [
+ "-components",
+ "+components/metrics",
+ "+components/metrics_services_manager",
+ "+components/rappor",
+ "+components/ukm",
+ "+components/variations",
+ "+services/network",
+]
diff --git a/components/metrics_services_manager/METADATA b/components/metrics_services_manager/METADATA
new file mode 100644
index 0000000..f64a0e6
--- /dev/null
+++ b/components/metrics_services_manager/METADATA
@@ -0,0 +1,21 @@
+name: "metrics_services_manager"
+description:
+ "Filtered subtree at components/metrics_services_manager."
+
+third_party {
+ url {
+ type: LOCAL_SOURCE
+ value: "https://cobalt.googlesource.com/components/metrics_services_manager_filtered_mirror"
+ }
+ url {
+ type: GIT
+ value: "https://github.com/chromium/chromium"
+ }
+ # Closest commit hash to m70.
+ version: "c413b7aea586859accffb96cd77b2b5078ff6607"
+ last_upgrade_date {
+ year: 2018
+ month: 05
+ day: 22
+ }
+}
diff --git a/components/metrics_services_manager/OWNERS b/components/metrics_services_manager/OWNERS
new file mode 100644
index 0000000..c929027
--- /dev/null
+++ b/components/metrics_services_manager/OWNERS
@@ -0,0 +1,3 @@
+file://base/metrics/OWNERS
+
+# COMPONENT: Internals>Metrics
diff --git a/components/metrics_services_manager/metrics_services_manager.cc b/components/metrics_services_manager/metrics_services_manager.cc
new file mode 100644
index 0000000..4952631
--- /dev/null
+++ b/components/metrics_services_manager/metrics_services_manager.cc
@@ -0,0 +1,177 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics_services_manager/metrics_services_manager.h"
+
+#include <utility>
+
+#include "base/command_line.h"
+#include "base/logging.h"
+#include "components/metrics/metrics_service.h"
+#include "components/metrics/metrics_service_client.h"
+#include "components/metrics/metrics_state_manager.h"
+#include "components/metrics/metrics_switches.h"
+#include "components/metrics_services_manager/metrics_services_manager_client.h"
+#include "components/rappor/rappor_service_impl.h"
+#include "components/ukm/ukm_service.h"
+#include "components/variations/service/variations_service.h"
+#include "services/network/public/cpp/shared_url_loader_factory.h"
+
+namespace metrics_services_manager {
+
+MetricsServicesManager::MetricsServicesManager(
+ std::unique_ptr<MetricsServicesManagerClient> client)
+ : client_(std::move(client)),
+ may_upload_(false),
+ may_record_(false),
+ consent_given_(false) {
+ DCHECK(client_);
+}
+
+MetricsServicesManager::~MetricsServicesManager() {}
+
+std::unique_ptr<const base::FieldTrial::EntropyProvider>
+MetricsServicesManager::CreateEntropyProvider() {
+ return client_->CreateEntropyProvider();
+}
+
+metrics::MetricsService* MetricsServicesManager::GetMetricsService() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ return GetMetricsServiceClient()->GetMetricsService();
+}
+
+rappor::RapporServiceImpl* MetricsServicesManager::GetRapporServiceImpl() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ if (!rappor_service_) {
+ rappor_service_ = client_->CreateRapporServiceImpl();
+ rappor_service_->Initialize(client_->GetURLLoaderFactory());
+ }
+ return rappor_service_.get();
+}
+
+ukm::UkmService* MetricsServicesManager::GetUkmService() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ return GetMetricsServiceClient()->GetUkmService();
+}
+
+variations::VariationsService* MetricsServicesManager::GetVariationsService() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ if (!variations_service_)
+ variations_service_ = client_->CreateVariationsService();
+ return variations_service_.get();
+}
+
+void MetricsServicesManager::OnPluginLoadingError(
+ const base::FilePath& plugin_path) {
+ GetMetricsServiceClient()->OnPluginLoadingError(plugin_path);
+}
+
+void MetricsServicesManager::OnRendererProcessCrash() {
+ GetMetricsServiceClient()->OnRendererProcessCrash();
+}
+
+metrics::MetricsServiceClient*
+MetricsServicesManager::GetMetricsServiceClient() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ if (!metrics_service_client_) {
+ metrics_service_client_ = client_->CreateMetricsServiceClient();
+ // base::Unretained is safe since |this| owns the metrics_service_client_.
+ metrics_service_client_->SetUpdateRunningServicesCallback(
+ base::Bind(&MetricsServicesManager::UpdateRunningServices,
+ base::Unretained(this)));
+ }
+ return metrics_service_client_.get();
+}
+
+void MetricsServicesManager::UpdatePermissions(bool current_may_record,
+ bool current_consent_given,
+ bool current_may_upload) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ // If the user has opted out of metrics, delete local UKM state. We Only check
+ // consent for UKM.
+ if (consent_given_ && !current_consent_given) {
+ ukm::UkmService* ukm = GetUkmService();
+ if (ukm) {
+ ukm->Purge();
+ ukm->ResetClientId();
+ }
+ }
+
+ // Stash the current permissions so that we can update the RapporServiceImpl
+ // correctly when the Rappor preference changes.
+ may_record_ = current_may_record;
+ consent_given_ = current_consent_given;
+ may_upload_ = current_may_upload;
+ UpdateRunningServices();
+}
+
+void MetricsServicesManager::UpdateRunningServices() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ metrics::MetricsService* metrics = GetMetricsService();
+
+ const base::CommandLine* cmdline = base::CommandLine::ForCurrentProcess();
+ if (cmdline->HasSwitch(metrics::switches::kMetricsRecordingOnly)) {
+ metrics->StartRecordingForTests();
+ GetRapporServiceImpl()->Update(true, false);
+ return;
+ }
+
+ client_->UpdateRunningServices(may_record_, may_upload_);
+
+ if (may_record_) {
+ if (!metrics->recording_active())
+ metrics->Start();
+ if (may_upload_)
+ metrics->EnableReporting();
+ else
+ metrics->DisableReporting();
+ } else {
+ metrics->Stop();
+ }
+
+ UpdateUkmService();
+
+ GetRapporServiceImpl()->Update(may_record_, may_upload_);
+}
+
+void MetricsServicesManager::UpdateUkmService() {
+ ukm::UkmService* ukm = GetUkmService();
+ if (!ukm)
+ return;
+
+ bool listeners_active =
+ GetMetricsServiceClient()->AreNotificationListenersEnabledOnAllProfiles();
+ bool sync_enabled = client_->IsMetricsReportingForceEnabled() ||
+ metrics_service_client_->SyncStateAllowsUkm();
+ bool is_incognito = client_->IsIncognitoSessionActive();
+
+ if (consent_given_ && listeners_active && sync_enabled && !is_incognito) {
+ // TODO(skare): revise this - merged in a big change
+ ukm->EnableRecording(
+ metrics_service_client_->SyncStateAllowsExtensionUkm());
+ if (may_upload_)
+ ukm->EnableReporting();
+ else
+ ukm->DisableReporting();
+ } else {
+ ukm->DisableRecording();
+ ukm->DisableReporting();
+ }
+}
+
+void MetricsServicesManager::UpdateUploadPermissions(bool may_upload) {
+ if (client_->IsMetricsReportingForceEnabled()) {
+ UpdatePermissions(true, true, true);
+ return;
+ }
+
+ UpdatePermissions(client_->IsMetricsReportingEnabled(),
+ client_->IsMetricsConsentGiven(), may_upload);
+}
+
+bool MetricsServicesManager::IsMetricsReportingEnabled() const {
+ return client_->IsMetricsReportingEnabled();
+}
+
+} // namespace metrics_services_manager
diff --git a/components/metrics_services_manager/metrics_services_manager.h b/components/metrics_services_manager/metrics_services_manager.h
new file mode 100644
index 0000000..912bf9a
--- /dev/null
+++ b/components/metrics_services_manager/metrics_services_manager.h
@@ -0,0 +1,138 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_SERVICES_MANAGER_METRICS_SERVICES_MANAGER_H_
+#define COMPONENTS_METRICS_SERVICES_MANAGER_METRICS_SERVICES_MANAGER_H_
+
+#include <memory>
+
+#include "base/macros.h"
+#include "base/metrics/field_trial.h"
+#include "base/threading/thread_checker.h"
+
+namespace base {
+class FilePath;
+}
+
+namespace metrics {
+class MetricsService;
+class MetricsServiceClient;
+class MetricsStateManager;
+}
+
+namespace rappor {
+class RapporServiceImpl;
+}
+
+namespace ukm {
+class UkmService;
+}
+
+namespace variations {
+class VariationsService;
+}
+
+namespace metrics_services_manager {
+
+class MetricsServicesManagerClient;
+
+// MetricsServicesManager is a helper class for embedders that use the various
+// metrics-related services in a Chrome-like fashion: MetricsService (via its
+// client), RapporServiceImpl and VariationsService.
+class MetricsServicesManager {
+ public:
+ // Creates the MetricsServicesManager with the given client.
+ explicit MetricsServicesManager(
+ std::unique_ptr<MetricsServicesManagerClient> client);
+ virtual ~MetricsServicesManager();
+
+ // Returns the preferred entropy provider used to seed persistent activities
+ // based on whether or not metrics reporting is permitted on this client.
+ //
+ // If there's consent to report metrics, this method returns an entropy
+ // provider that has a high source of entropy, partially based on the client
+ // ID. Otherwise, it returns an entropy provider that is based on a low
+ // entropy source.
+ std::unique_ptr<const base::FieldTrial::EntropyProvider>
+ CreateEntropyProvider();
+
+ // Returns the MetricsService, creating it if it hasn't been created yet (and
+ // additionally creating the MetricsServiceClient in that case).
+ metrics::MetricsService* GetMetricsService();
+
+ // Returns the RapporServiceImpl, creating it if it hasn't been created yet.
+ rappor::RapporServiceImpl* GetRapporServiceImpl();
+
+ // Returns the UkmService, creating it if it hasn't been created yet.
+ ukm::UkmService* GetUkmService();
+
+ // Returns the VariationsService, creating it if it hasn't been created yet.
+ variations::VariationsService* GetVariationsService();
+
+ // Should be called when a plugin loading error occurs.
+ void OnPluginLoadingError(const base::FilePath& plugin_path);
+
+ // Some embedders use this method to notify the metrics system when a
+ // renderer process exits unexpectedly.
+ void OnRendererProcessCrash();
+
+ // Update the managed services when permissions for uploading metrics change.
+ void UpdateUploadPermissions(bool may_upload);
+
+ // Gets the current state of metric reporting.
+ bool IsMetricsReportingEnabled() const;
+
+ private:
+ // Update the managed services when permissions for recording/uploading
+ // metrics change.
+ void UpdateRapporServiceImpl();
+
+ // Returns the MetricsServiceClient, creating it if it hasn't been
+ // created yet (and additionally creating the MetricsService in that case).
+ metrics::MetricsServiceClient* GetMetricsServiceClient();
+
+ metrics::MetricsStateManager* GetMetricsStateManager();
+
+ // Update which services are running to match current permissions.
+ void UpdateRunningServices();
+
+ // Update the state of UkmService to match current permissions.
+ void UpdateUkmService();
+
+ // Update the managed services when permissions for recording/uploading
+ // metrics change.
+ void UpdatePermissions(bool current_may_record,
+ bool current_consent_given,
+ bool current_may_upload);
+
+ // The client passed in from the embedder.
+ const std::unique_ptr<MetricsServicesManagerClient> client_;
+
+ // Ensures that all functions are called from the same thread.
+ base::ThreadChecker thread_checker_;
+
+ // The current metrics reporting setting.
+ bool may_upload_;
+
+ // The current metrics recording setting.
+ bool may_record_;
+
+ // The current metrics setting reflecting if consent was given.
+ bool consent_given_;
+
+ // The MetricsServiceClient. Owns the MetricsService.
+ std::unique_ptr<metrics::MetricsServiceClient> metrics_service_client_;
+
+ // The RapporServiceImpl, for RAPPOR metric uploads.
+ std::unique_ptr<rappor::RapporServiceImpl> rappor_service_;
+
+ // The VariationsService, for server-side experiments infrastructure.
+ std::unique_ptr<variations::VariationsService> variations_service_;
+
+ DISALLOW_COPY_AND_ASSIGN(MetricsServicesManager);
+};
+
+} // namespace metrics_services_manager
+
+#endif // COMPONENTS_METRICS_SERVICES_MANAGER_METRICS_SERVICES_MANAGER_H_
diff --git a/components/metrics_services_manager/metrics_services_manager_client.cc b/components/metrics_services_manager/metrics_services_manager_client.cc
new file mode 100644
index 0000000..05c6869
--- /dev/null
+++ b/components/metrics_services_manager/metrics_services_manager_client.cc
@@ -0,0 +1,13 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics_services_manager/metrics_services_manager_client.h"
+
+namespace metrics_services_manager {
+
+bool MetricsServicesManagerClient::IsMetricsReportingForceEnabled() {
+ return false;
+}
+
+} // namespace metrics_services_manager
diff --git a/components/metrics_services_manager/metrics_services_manager_client.h b/components/metrics_services_manager/metrics_services_manager_client.h
new file mode 100644
index 0000000..138f1e0
--- /dev/null
+++ b/components/metrics_services_manager/metrics_services_manager_client.h
@@ -0,0 +1,70 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_SERVICES_MANAGER_METRICS_SERVICES_MANAGER_CLIENT_H_
+#define COMPONENTS_METRICS_SERVICES_MANAGER_METRICS_SERVICES_MANAGER_CLIENT_H_
+
+#include <memory>
+
+#include "base/callback_forward.h"
+#include "base/metrics/field_trial.h"
+
+namespace metrics {
+class MetricsServiceClient;
+}
+
+namespace network {
+class SharedURLLoaderFactory;
+}
+
+namespace rappor {
+class RapporServiceImpl;
+}
+
+namespace variations {
+class VariationsService;
+}
+
+namespace metrics_services_manager {
+
+// MetricsServicesManagerClient is an interface that allows
+// MetricsServicesManager to interact with its embedder.
+class MetricsServicesManagerClient {
+ public:
+ virtual ~MetricsServicesManagerClient() {}
+
+ // Methods that create the various services in the context of the embedder.
+ virtual std::unique_ptr<rappor::RapporServiceImpl>
+ CreateRapporServiceImpl() = 0;
+ virtual std::unique_ptr<variations::VariationsService>
+ CreateVariationsService() = 0;
+ virtual std::unique_ptr<metrics::MetricsServiceClient>
+ CreateMetricsServiceClient() = 0;
+ virtual std::unique_ptr<const base::FieldTrial::EntropyProvider>
+ CreateEntropyProvider() = 0;
+
+ // Returns the URL loader factory which the metrics services should use.
+ virtual scoped_refptr<network::SharedURLLoaderFactory>
+ GetURLLoaderFactory() = 0;
+
+ // Returns whether metrics reporting is enabled.
+ virtual bool IsMetricsReportingEnabled() = 0;
+
+ // Returns whether metrics consent is given.
+ virtual bool IsMetricsConsentGiven() = 0;
+
+ // Returns whether there are any Incognito browsers/tabs open.
+ virtual bool IsIncognitoSessionActive() = 0;
+
+ // Update the running state of metrics services managed by the embedder, for
+ // example, crash reporting.
+ virtual void UpdateRunningServices(bool may_record, bool may_upload) {}
+
+ // If the user has forced metrics collection on via the override flag.
+ virtual bool IsMetricsReportingForceEnabled();
+};
+
+} // namespace metrics_services_manager
+
+#endif // COMPONENTS_METRICS_SERVICES_MANAGER_METRICS_SERVICES_MANAGER_CLIENT_H_
diff --git a/components/prefs/json_pref_store.cc b/components/prefs/json_pref_store.cc
index 7d8d189..d0d7481 100644
--- a/components/prefs/json_pref_store.cc
+++ b/components/prefs/json_pref_store.cc
@@ -206,9 +206,13 @@
base::Value* old_value = nullptr;
prefs_->Get(key, &old_value);
if (!old_value || !value->Equals(old_value)) {
+#if defined(STARBOARD)
// Value::DictionaryValue::Set creates a nested dictionary treating a URL
// key as a path, SetKey avoids this.
- prefs_->SetKey(key, std::move(*value.get()));
+ prefs_->SetKey(key, base::Value::FromUniquePtrValue(std::move(value)));
+#else
+ prefs_->Set(key, std::move(value));
+#endif
ReportValueChanged(key, flags);
}
}
@@ -222,7 +226,11 @@
base::Value* old_value = nullptr;
prefs_->Get(key, &old_value);
if (!old_value || !value->Equals(old_value)) {
- prefs_->SetPath({key}, base::Value::FromUniquePtrValue(std::move(value)));
+#if defined(STARBOARD)
+ prefs_->SetKey(key, base::Value::FromUniquePtrValue(std::move(value)));
+#else
+ prefs_->Set(key, std::move(value));
+#endif
ScheduleWrite(flags);
}
}
diff --git a/components/ukm/BUILD.gn b/components/ukm/BUILD.gn
new file mode 100644
index 0000000..6bb8424
--- /dev/null
+++ b/components/ukm/BUILD.gn
@@ -0,0 +1,118 @@
+# Copyright 2017 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//testing/test.gni")
+
+# The Url-Keyed Metrics (UKM) service is responsible for gathering and
+# uploading reports that contain fine grained performance metrics including
+# URLs for top-level navigations.
+static_library("ukm") {
+ sources = [
+ "persisted_logs_metrics_impl.cc",
+ "persisted_logs_metrics_impl.h",
+ "ukm_pref_names.cc",
+ "ukm_pref_names.h",
+ "ukm_recorder_impl.cc",
+ "ukm_recorder_impl.h",
+ "ukm_reporting_service.cc",
+ "ukm_reporting_service.h",
+ "ukm_rotation_scheduler.cc",
+ "ukm_rotation_scheduler.h",
+ "ukm_service.cc",
+ "ukm_service.h",
+ ]
+
+ public_deps = [
+ "//services/metrics/public/cpp:metrics_cpp",
+ "//services/metrics/public/cpp:ukm_builders",
+ "//services/metrics/public/mojom",
+ "//third_party/metrics_proto",
+ ]
+
+ deps = [
+ "//base",
+ "//components/data_use_measurement/core",
+ "//components/metrics",
+ "//components/prefs",
+ "//components/variations",
+ "//url",
+ ]
+}
+
+# Helper library for observing signals that we need to clear any local data.
+static_library("observers") {
+ sources = [
+ "observers/history_delete_observer.cc",
+ "observers/history_delete_observer.h",
+ "observers/sync_disable_observer.cc",
+ "observers/sync_disable_observer.h",
+ ]
+
+ deps = [
+ "//base",
+ "//components/history/core/browser",
+ "//components/sync",
+ ]
+
+ public_deps = [
+ "//components/unified_consent",
+ ]
+}
+
+static_library("test_support") {
+ testonly = true
+ sources = [
+ "test_ukm_recorder.cc",
+ "test_ukm_recorder.h",
+ ]
+
+ public_deps = [
+ ":ukm",
+ "//third_party/metrics_proto",
+ ]
+ deps = [
+ "//base",
+ "//components/metrics:test_support",
+ "//components/prefs:test_support",
+ "//testing/gtest:gtest",
+ ]
+}
+
+source_set("unit_tests") {
+ testonly = true
+ sources = [
+ "observers/sync_disable_observer_unittest.cc",
+ "ukm_service_unittest.cc",
+ ]
+
+ deps = [
+ ":observers",
+ ":test_support",
+ ":ukm",
+ "//base",
+ "//base/test:test_support",
+ "//components/metrics",
+ "//components/metrics:test_support",
+ "//components/prefs:test_support",
+ "//components/sync",
+ "//components/sync:test_support_driver",
+ "//components/sync_preferences:test_support",
+ "//components/variations",
+ "//net:test_support",
+ "//services/metrics/public/cpp:ukm_builders",
+ "//testing/gtest",
+ "//third_party/zlib/google:compression_utils",
+ "//url",
+ ]
+}
+
+# Convenience testing target
+test("ukm_unittests") {
+ deps = [
+ ":unit_tests",
+ "//base",
+ "//base/test:test_support",
+ "//components/test:run_all_unittests",
+ ]
+}
diff --git a/components/ukm/DEPS b/components/ukm/DEPS
new file mode 100644
index 0000000..053cb84
--- /dev/null
+++ b/components/ukm/DEPS
@@ -0,0 +1,16 @@
+include_rules = [
+ "+components/metrics",
+ "+components/prefs",
+ "+components/unified_consent",
+ "+components/variations",
+ "+mojo/public",
+ "+services/metrics/public",
+ "+third_party/metrics_proto",
+ "+third_party/zlib/google",
+]
+
+specific_include_rules = {
+ ".*unittest\.cc": [
+ "+components/sync_preferences/testing_pref_service_syncable.h",
+ ]
+}
\ No newline at end of file
diff --git a/components/ukm/METADATA b/components/ukm/METADATA
new file mode 100644
index 0000000..7947c4b
--- /dev/null
+++ b/components/ukm/METADATA
@@ -0,0 +1,21 @@
+name: "ukm"
+description:
+ "Filtered subtree at components/ukm."
+
+third_party {
+ url {
+ type: LOCAL_SOURCE
+ value: "https://cobalt.googlesource.com/components_ukm_filtered_mirror"
+ }
+ url {
+ type: GIT
+ value: "https://github.com/chromium/chromium"
+ }
+ # Closest commit hash to m70.
+ version: "73162fc55e079d35b6f8c02a6ceee6298f0b3b7e"
+ last_upgrade_date {
+ year: 2018
+ month: 08
+ day: 29
+ }
+}
diff --git a/components/ukm/OWNERS b/components/ukm/OWNERS
new file mode 100644
index 0000000..684cb16
--- /dev/null
+++ b/components/ukm/OWNERS
@@ -0,0 +1,3 @@
+file://base/metrics/OWNERS
+
+# COMPONENT: Internals>Metrics>UKM
diff --git a/components/ukm/content/BUILD.gn b/components/ukm/content/BUILD.gn
new file mode 100644
index 0000000..42ee889
--- /dev/null
+++ b/components/ukm/content/BUILD.gn
@@ -0,0 +1,42 @@
+# Copyright 2017 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+static_library("content") {
+ sources = [
+ "app_source_url_recorder.cc",
+ "app_source_url_recorder.h",
+ "source_url_recorder.cc",
+ "source_url_recorder.h",
+ ]
+ deps = [
+ "//base",
+ "//components/crx_file",
+ "//content/public/browser",
+ "//services/metrics/public/cpp:metrics_cpp",
+ "//services/metrics/public/cpp:ukm_builders",
+ "//services/metrics/public/mojom",
+ "//url",
+ ]
+}
+
+source_set("unit_tests") {
+ testonly = true
+ sources = [
+ "app_source_url_recorder_test.cc",
+ "source_url_recorder_test.cc",
+ ]
+ deps = [
+ ":content",
+ "//base",
+ "//base/test:test_support",
+ "//components/metrics",
+ "//components/metrics:test_support",
+ "//components/ukm",
+ "//components/ukm:test_support",
+ "//content/public/browser",
+ "//content/test:test_support",
+ "//testing/gtest",
+ "//url",
+ ]
+}
diff --git a/components/ukm/content/DEPS b/components/ukm/content/DEPS
new file mode 100644
index 0000000..8ec5f07
--- /dev/null
+++ b/components/ukm/content/DEPS
@@ -0,0 +1,8 @@
+include_rules = [
+ "+components/crx_file/id_util.h",
+ "+content/public/browser",
+ "+content/public/common",
+ "+content/public/test",
+ "+content/shell",
+ "+third_party/blink/public/platform/ukm.mojom.h",
+]
diff --git a/components/ukm/content/app_source_url_recorder.cc b/components/ukm/content/app_source_url_recorder.cc
new file mode 100644
index 0000000..93f16c5
--- /dev/null
+++ b/components/ukm/content/app_source_url_recorder.cc
@@ -0,0 +1,51 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/ukm/content/app_source_url_recorder.h"
+
+#include "base/atomic_sequence_num.h"
+#include "components/crx_file/id_util.h"
+#include "services/metrics/public/cpp/delegating_ukm_recorder.h"
+#include "services/metrics/public/cpp/ukm_recorder.h"
+#include "services/metrics/public/cpp/ukm_source_id.h"
+#include "url/gurl.h"
+
+namespace ukm {
+
+SourceId AssignNewAppId() {
+ static base::AtomicSequenceNumber seq;
+ return ConvertToSourceId(seq.GetNext() + 1, SourceIdType::APP_ID);
+}
+
+SourceId AppSourceUrlRecorder::GetSourceIdForChromeApp(const std::string& id) {
+ GURL url("chrome-extension://" + id);
+ return GetSourceIdForUrl(url);
+}
+
+SourceId AppSourceUrlRecorder::GetSourceIdForArc(
+ const std::string& package_name) {
+ const std::string package_name_hash =
+ crx_file::id_util::GenerateId(package_name);
+ GURL url("app://play/" + package_name_hash);
+ return GetSourceIdForUrl(url);
+}
+
+SourceId AppSourceUrlRecorder::GetSourceIdForPWA(const GURL& url) {
+ return GetSourceIdForUrl(url);
+}
+
+SourceId AppSourceUrlRecorder::GetSourceIdForUrl(const GURL& url) {
+ ukm::DelegatingUkmRecorder* const recorder =
+ ukm::DelegatingUkmRecorder::Get();
+ if (!recorder)
+ return kInvalidSourceId;
+
+ const SourceId source_id = AssignNewAppId();
+ if (base::FeatureList::IsEnabled(kUkmAppLogging)) {
+ recorder->UpdateAppURL(source_id, url);
+ }
+ return source_id;
+}
+
+} // namespace ukm
diff --git a/components/ukm/content/app_source_url_recorder.h b/components/ukm/content/app_source_url_recorder.h
new file mode 100644
index 0000000..4b98434
--- /dev/null
+++ b/components/ukm/content/app_source_url_recorder.h
@@ -0,0 +1,40 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_UKM_CONTENT_APP_SOURCE_URL_RECORDER_H_
+#define COMPONENTS_UKM_CONTENT_APP_SOURCE_URL_RECORDER_H_
+
+#include "services/metrics/public/cpp/ukm_source_id.h"
+
+#include "base/feature_list.h"
+
+#include <string>
+
+class GURL;
+
+namespace ukm {
+
+const base::Feature kUkmAppLogging{"UkmAppLogging",
+ base::FEATURE_DISABLED_BY_DEFAULT};
+
+class AppSourceUrlRecorder {
+ private:
+ friend class AppSourceUrlRecorderTest;
+
+ // Get a UKM SourceId for a Chrome app.
+ static SourceId GetSourceIdForChromeApp(const std::string& id);
+
+ // Get a UKM SourceId for an Arc app.
+ static SourceId GetSourceIdForArc(const std::string& package_name);
+
+ // Get a UKM SourceId for a PWA.
+ static SourceId GetSourceIdForPWA(const GURL& url);
+
+ // For internal use only.
+ static SourceId GetSourceIdForUrl(const GURL& url);
+};
+
+} // namespace ukm
+
+#endif // COMPONENTS_UKM_CONTENT_APP_SOURCE_URL_RECORDER_H_
diff --git a/components/ukm/content/app_source_url_recorder_test.cc b/components/ukm/content/app_source_url_recorder_test.cc
new file mode 100644
index 0000000..ae4807b
--- /dev/null
+++ b/components/ukm/content/app_source_url_recorder_test.cc
@@ -0,0 +1,66 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/ukm/content/app_source_url_recorder.h"
+
+#include "base/test/scoped_feature_list.h"
+#include "components/ukm/test_ukm_recorder.h"
+#include "content/public/test/test_renderer_host.h"
+#include "services/metrics/public/cpp/ukm_source.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "url/gurl.h"
+
+namespace ukm {
+
+class AppSourceUrlRecorderTest : public content::RenderViewHostTestHarness {
+ public:
+ void SetUp() override {
+ scoped_feature_list_.InitAndEnableFeature(kUkmAppLogging);
+ content::RenderViewHostTestHarness::SetUp();
+ }
+
+ protected:
+ SourceId GetSourceIdForArc(const std::string& package_name) {
+ return AppSourceUrlRecorder::GetSourceIdForArc(package_name);
+ }
+
+ SourceId GetSourceIdForPWA(const GURL& url) {
+ return AppSourceUrlRecorder::GetSourceIdForPWA(url);
+ }
+
+ base::test::ScopedFeatureList scoped_feature_list_;
+ TestAutoSetUkmRecorder test_ukm_recorder_;
+};
+
+TEST_F(AppSourceUrlRecorderTest, CheckArc) {
+ SourceId id = GetSourceIdForArc("com.google.play");
+
+ std::string com_google_play_hash("pjhgmeephkiehhlkfcoginnkbphkdang");
+ GURL expected_url("app://play/" + com_google_play_hash);
+
+ const auto& sources = test_ukm_recorder_.GetSources();
+ ASSERT_EQ(1ul, sources.size());
+
+ ASSERT_NE(kInvalidSourceId, id);
+ auto it = sources.find(id);
+ ASSERT_NE(sources.end(), it);
+ EXPECT_EQ(expected_url, it->second->url());
+ EXPECT_EQ(1u, it->second->urls().size());
+}
+
+TEST_F(AppSourceUrlRecorderTest, CheckPWA) {
+ GURL url("https://pwa_example_url.com");
+ SourceId id = GetSourceIdForPWA(url);
+
+ const auto& sources = test_ukm_recorder_.GetSources();
+ ASSERT_EQ(1ul, sources.size());
+
+ ASSERT_NE(kInvalidSourceId, id);
+ auto it = sources.find(id);
+ ASSERT_NE(sources.end(), it);
+ EXPECT_EQ(url, it->second->url());
+ EXPECT_EQ(1u, it->second->urls().size());
+}
+
+} // namespace ukm
diff --git a/components/ukm/content/source_url_recorder.cc b/components/ukm/content/source_url_recorder.cc
new file mode 100644
index 0000000..b2741f7
--- /dev/null
+++ b/components/ukm/content/source_url_recorder.cc
@@ -0,0 +1,293 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/ukm/content/source_url_recorder.h"
+
+#include <utility>
+
+#include "base/containers/flat_map.h"
+#include "base/macros.h"
+#include "content/public/browser/navigation_handle.h"
+#include "content/public/browser/web_contents.h"
+#include "content/public/browser/web_contents_binding_set.h"
+#include "content/public/browser/web_contents_observer.h"
+#include "content/public/browser/web_contents_user_data.h"
+#include "services/metrics/public/cpp/delegating_ukm_recorder.h"
+#include "services/metrics/public/cpp/ukm_builders.h"
+#include "services/metrics/public/cpp/ukm_source_id.h"
+#include "third_party/blink/public/platform/ukm.mojom.h"
+#include "url/gurl.h"
+
+namespace ukm {
+
+namespace internal {
+
+int64_t CreateUniqueTabId() {
+ static int64_t unique_id_counter = 0;
+ return ++unique_id_counter;
+}
+
+// SourceUrlRecorderWebContentsObserver is responsible for recording UKM source
+// URLs, for all (any only) main frame navigations in a given WebContents.
+// SourceUrlRecorderWebContentsObserver records both the final URL for a
+// navigation, and, if the navigation was redirected, the initial URL as well.
+class SourceUrlRecorderWebContentsObserver
+ : public blink::mojom::UkmSourceIdFrameHost,
+ public content::WebContentsObserver,
+ public content::WebContentsUserData<
+ SourceUrlRecorderWebContentsObserver> {
+ public:
+ // Creates a SourceUrlRecorderWebContentsObserver for the given
+ // WebContents. If a SourceUrlRecorderWebContentsObserver is already
+ // associated with the WebContents, this method is a no-op.
+ static void CreateForWebContents(content::WebContents* web_contents);
+
+ // content::WebContentsObserver:
+ void DidStartNavigation(
+ content::NavigationHandle* navigation_handle) override;
+ void DidFinishNavigation(
+ content::NavigationHandle* navigation_handle) override;
+ void DidOpenRequestedURL(content::WebContents* new_contents,
+ content::RenderFrameHost* source_render_frame_host,
+ const GURL& url,
+ const content::Referrer& referrer,
+ WindowOpenDisposition disposition,
+ ui::PageTransition transition,
+ bool started_from_context_menu,
+ bool renderer_initiated) override;
+
+ ukm::SourceId GetLastCommittedSourceId() const;
+
+ // blink::mojom::UkmSourceIdFrameHost
+ void SetDocumentSourceId(int64_t source_id) override;
+
+ private:
+ explicit SourceUrlRecorderWebContentsObserver(
+ content::WebContents* web_contents);
+ friend class content::WebContentsUserData<
+ SourceUrlRecorderWebContentsObserver>;
+
+ // Record any pending DocumentCreated events to UKM.
+ void MaybeFlushPendingEvents();
+
+ void MaybeRecordUrl(content::NavigationHandle* navigation_handle,
+ const GURL& initial_url);
+
+ // Recieves document source IDs from the renderer.
+ content::WebContentsFrameBindingSet<blink::mojom::UkmSourceIdFrameHost>
+ bindings_;
+
+ // Map from navigation ID to the initial URL for that navigation.
+ base::flat_map<int64_t, GURL> pending_navigations_;
+
+ // Holds pending DocumentCreated events.
+ struct PendingEvent {
+ PendingEvent() = delete;
+ PendingEvent(int64_t source_id,
+ bool is_main_frame,
+ bool is_cross_origin_frame)
+ : source_id(source_id),
+ is_main_frame(is_main_frame),
+ is_cross_origin_frame(is_cross_origin_frame) {}
+
+ int64_t source_id;
+ bool is_main_frame;
+ bool is_cross_origin_frame;
+ };
+ std::vector<PendingEvent> pending_document_created_events_;
+
+ SourceId last_committed_source_id_;
+
+ // The source id before |last_committed_source_id_|.
+ SourceId previous_committed_source_id_;
+
+ // The source id of the last committed source in the tab that opened this tab.
+ // Will be set to kInvalidSourceId after the first navigation in this tab is
+ // finished.
+ SourceId opener_source_id_;
+
+ const int64_t tab_id_;
+
+ DISALLOW_COPY_AND_ASSIGN(SourceUrlRecorderWebContentsObserver);
+};
+
+SourceUrlRecorderWebContentsObserver::SourceUrlRecorderWebContentsObserver(
+ content::WebContents* web_contents)
+ : content::WebContentsObserver(web_contents),
+ bindings_(web_contents, this),
+ last_committed_source_id_(ukm::kInvalidSourceId),
+ previous_committed_source_id_(ukm::kInvalidSourceId),
+ opener_source_id_(ukm::kInvalidSourceId),
+ tab_id_(CreateUniqueTabId()) {}
+
+void SourceUrlRecorderWebContentsObserver::DidStartNavigation(
+ content::NavigationHandle* navigation_handle) {
+ // UKM only records URLs for main frame (web page) navigations, so ignore
+ // non-main frame navs. Additionally, at least for the time being, we don't
+ // track metrics for same-document navigations (e.g. changes in URL fragment,
+ // or URL changes due to history.pushState) in UKM.
+ if (!navigation_handle->IsInMainFrame() ||
+ navigation_handle->IsSameDocument()) {
+ return;
+ }
+
+ // UKM doesn't want to record URLs for downloads. However, at the point a
+ // navigation is started, we don't yet know if the navigation will result in a
+ // download. Thus, we record the URL at the time a navigation was initiated,
+ // and only record it later, once we verify that the navigation didn't result
+ // in a download.
+ pending_navigations_.insert(std::make_pair(
+ navigation_handle->GetNavigationId(), navigation_handle->GetURL()));
+
+ // Clear any unassociated pending events.
+ pending_document_created_events_.clear();
+}
+
+void SourceUrlRecorderWebContentsObserver::DidFinishNavigation(
+ content::NavigationHandle* navigation_handle) {
+ auto it = pending_navigations_.find(navigation_handle->GetNavigationId());
+ if (it == pending_navigations_.end())
+ return;
+
+ DCHECK(navigation_handle->IsInMainFrame());
+ DCHECK(!navigation_handle->IsSameDocument());
+
+ if (navigation_handle->HasCommitted()) {
+ previous_committed_source_id_ = last_committed_source_id_;
+ last_committed_source_id_ = ukm::ConvertToSourceId(
+ navigation_handle->GetNavigationId(), ukm::SourceIdType::NAVIGATION_ID);
+ }
+
+ GURL initial_url = std::move(it->second);
+ pending_navigations_.erase(it);
+
+ // UKM doesn't want to record URLs for navigations that result in downloads.
+ if (navigation_handle->IsDownload())
+ return;
+
+ MaybeRecordUrl(navigation_handle, initial_url);
+
+ MaybeFlushPendingEvents();
+
+ // Reset the opener source id. Only the first source in a tab should have an
+ // opener.
+ opener_source_id_ = kInvalidSourceId;
+}
+
+void SourceUrlRecorderWebContentsObserver::DidOpenRequestedURL(
+ content::WebContents* new_contents,
+ content::RenderFrameHost* source_render_frame_host,
+ const GURL& url,
+ const content::Referrer& referrer,
+ WindowOpenDisposition disposition,
+ ui::PageTransition transition,
+ bool started_from_context_menu,
+ bool renderer_initiated) {
+ auto* new_recorder =
+ SourceUrlRecorderWebContentsObserver::FromWebContents(new_contents);
+ if (!new_recorder)
+ return;
+ new_recorder->opener_source_id_ = GetLastCommittedSourceId();
+}
+
+ukm::SourceId SourceUrlRecorderWebContentsObserver::GetLastCommittedSourceId()
+ const {
+ return last_committed_source_id_;
+}
+
+void SourceUrlRecorderWebContentsObserver::SetDocumentSourceId(
+ int64_t source_id) {
+ content::RenderFrameHost* main_frame = web_contents()->GetMainFrame();
+ content::RenderFrameHost* current_frame = bindings_.GetCurrentTargetFrame();
+ bool is_main_frame = main_frame == current_frame;
+ bool is_cross_origin_frame =
+ is_main_frame ? false
+ : !main_frame->GetLastCommittedOrigin().IsSameOriginWith(
+ current_frame->GetLastCommittedOrigin());
+
+ pending_document_created_events_.emplace_back(
+ source_id, !bindings_.GetCurrentTargetFrame()->GetParent(),
+ is_cross_origin_frame);
+ MaybeFlushPendingEvents();
+}
+
+void SourceUrlRecorderWebContentsObserver::MaybeFlushPendingEvents() {
+ if (!last_committed_source_id_)
+ return;
+
+ ukm::DelegatingUkmRecorder* ukm_recorder = ukm::DelegatingUkmRecorder::Get();
+ if (!ukm_recorder)
+ return;
+
+ while (!pending_document_created_events_.empty()) {
+ auto record = pending_document_created_events_.back();
+
+ ukm::builders::DocumentCreated(record.source_id)
+ .SetNavigationSourceId(last_committed_source_id_)
+ .SetIsMainFrame(record.is_main_frame)
+ .SetIsCrossOriginFrame(record.is_cross_origin_frame)
+ .Record(ukm_recorder);
+
+ pending_document_created_events_.pop_back();
+ }
+}
+
+void SourceUrlRecorderWebContentsObserver::MaybeRecordUrl(
+ content::NavigationHandle* navigation_handle,
+ const GURL& initial_url) {
+ DCHECK(navigation_handle->IsInMainFrame());
+ DCHECK(!navigation_handle->IsSameDocument());
+
+ ukm::DelegatingUkmRecorder* ukm_recorder = ukm::DelegatingUkmRecorder::Get();
+ if (!ukm_recorder)
+ return;
+
+ UkmSource::NavigationData navigation_data;
+ const GURL& final_url = navigation_handle->GetURL();
+ // TODO(crbug.com/869123): This check isn't quite correct, as self redirecting
+ // is possible. This may also be changed to include the entire redirect chain.
+ if (final_url != initial_url)
+ navigation_data.urls = {initial_url};
+ navigation_data.urls.push_back(final_url);
+
+ // Careful note: the current navigation may have failed.
+ navigation_data.previous_source_id = navigation_handle->HasCommitted()
+ ? previous_committed_source_id_
+ : last_committed_source_id_;
+ navigation_data.opener_source_id = opener_source_id_;
+ navigation_data.tab_id = tab_id_;
+
+ const ukm::SourceId source_id = ukm::ConvertToSourceId(
+ navigation_handle->GetNavigationId(), ukm::SourceIdType::NAVIGATION_ID);
+ ukm_recorder->RecordNavigation(source_id, navigation_data);
+}
+
+// static
+void SourceUrlRecorderWebContentsObserver::CreateForWebContents(
+ content::WebContents* web_contents) {
+ if (!SourceUrlRecorderWebContentsObserver::FromWebContents(web_contents)) {
+ web_contents->SetUserData(
+ SourceUrlRecorderWebContentsObserver::UserDataKey(),
+ base::WrapUnique(
+ new SourceUrlRecorderWebContentsObserver(web_contents)));
+ }
+}
+
+} // namespace internal
+
+void InitializeSourceUrlRecorderForWebContents(
+ content::WebContents* web_contents) {
+ internal::SourceUrlRecorderWebContentsObserver::CreateForWebContents(
+ web_contents);
+}
+
+SourceId GetSourceIdForWebContentsDocument(
+ const content::WebContents* web_contents) {
+ const internal::SourceUrlRecorderWebContentsObserver* obs =
+ internal::SourceUrlRecorderWebContentsObserver::FromWebContents(
+ web_contents);
+ return obs ? obs->GetLastCommittedSourceId() : kInvalidSourceId;
+}
+
+} // namespace ukm
diff --git a/components/ukm/content/source_url_recorder.h b/components/ukm/content/source_url_recorder.h
new file mode 100644
index 0000000..64f704d
--- /dev/null
+++ b/components/ukm/content/source_url_recorder.h
@@ -0,0 +1,27 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_UKM_CONTENT_SOURCE_URL_RECORDER_H_
+#define COMPONENTS_UKM_CONTENT_SOURCE_URL_RECORDER_H_
+
+#include "services/metrics/public/cpp/ukm_source_id.h"
+
+namespace content {
+class WebContents;
+} // namespace content
+
+namespace ukm {
+
+// Initializes recording of UKM source URLs for the given WebContents.
+void InitializeSourceUrlRecorderForWebContents(
+ content::WebContents* web_contents);
+
+// Get a UKM SourceId for the currently committed document of web contents.
+// Returns kInvalidSourceId if no commit has been observed.
+SourceId GetSourceIdForWebContentsDocument(
+ const content::WebContents* web_contents);
+
+} // namespace ukm
+
+#endif // COMPONENTS_UKM_CONTENT_SOURCE_URL_RECORDER_H_
diff --git a/components/ukm/content/source_url_recorder_browsertest.cc b/components/ukm/content/source_url_recorder_browsertest.cc
new file mode 100644
index 0000000..bb52494
--- /dev/null
+++ b/components/ukm/content/source_url_recorder_browsertest.cc
@@ -0,0 +1,161 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <memory>
+
+#include "base/files/scoped_temp_dir.h"
+#include "base/test/scoped_feature_list.h"
+#include "components/ukm/content/source_url_recorder.h"
+#include "components/ukm/test_ukm_recorder.h"
+#include "content/public/browser/web_contents.h"
+#include "content/public/test/content_browser_test.h"
+#include "content/public/test/content_browser_test_utils.h"
+#include "content/public/test/navigation_handle_observer.h"
+#include "content/shell/browser/shell.h"
+#include "content/shell/browser/shell_browser_context.h"
+#include "content/shell/browser/shell_download_manager_delegate.h"
+#include "services/metrics/public/cpp/ukm_builders.h"
+#include "services/metrics/public/cpp/ukm_source.h"
+
+class SourceUrlRecorderWebContentsObserverBrowserTest
+ : public content::ContentBrowserTest {
+ protected:
+ SourceUrlRecorderWebContentsObserverBrowserTest() {
+ scoped_feature_list_.InitAndEnableFeature(ukm::kUkmFeature);
+ }
+
+ ~SourceUrlRecorderWebContentsObserverBrowserTest() override {}
+
+ void SetUpOnMainThread() override {
+ content::ContentBrowserTest::SetUpOnMainThread();
+
+ ASSERT_TRUE(embedded_test_server()->Start());
+
+ test_ukm_recorder_ = std::make_unique<ukm::TestAutoSetUkmRecorder>();
+ ukm::InitializeSourceUrlRecorderForWebContents(shell()->web_contents());
+ }
+
+ const ukm::UkmSource* GetSourceForNavigationId(int64_t navigation_id) {
+ CHECK_GT(navigation_id, 0);
+ const ukm::SourceId source_id =
+ ukm::ConvertToSourceId(navigation_id, ukm::SourceIdType::NAVIGATION_ID);
+ return test_ukm_recorder_->GetSourceForSourceId(source_id);
+ }
+
+ GURL GetAssociatedURLForWebContentsDocument() {
+ const ukm::UkmSource* src = test_ukm_recorder_->GetSourceForSourceId(
+ ukm::GetSourceIdForWebContentsDocument(shell()->web_contents()));
+ return src ? src->url() : GURL();
+ }
+
+ const ukm::TestAutoSetUkmRecorder& test_ukm_recorder() const {
+ return *test_ukm_recorder_;
+ }
+
+ private:
+ base::test::ScopedFeatureList scoped_feature_list_;
+ std::unique_ptr<ukm::TestAutoSetUkmRecorder> test_ukm_recorder_;
+
+ DISALLOW_COPY_AND_ASSIGN(SourceUrlRecorderWebContentsObserverBrowserTest);
+};
+
+class SourceUrlRecorderWebContentsObserverDownloadBrowserTest
+ : public SourceUrlRecorderWebContentsObserverBrowserTest {
+ protected:
+ void SetUpOnMainThread() override {
+ SourceUrlRecorderWebContentsObserverBrowserTest::SetUpOnMainThread();
+
+ // Set up a test download directory, in order to prevent prompting for
+ // handling downloads.
+ ASSERT_TRUE(downloads_directory_.CreateUniqueTempDir());
+ content::ShellDownloadManagerDelegate* delegate =
+ static_cast<content::ShellDownloadManagerDelegate*>(
+ shell()
+ ->web_contents()
+ ->GetBrowserContext()
+ ->GetDownloadManagerDelegate());
+ delegate->SetDownloadBehaviorForTesting(downloads_directory_.GetPath());
+ }
+
+ private:
+ base::ScopedTempDir downloads_directory_;
+};
+
+IN_PROC_BROWSER_TEST_F(SourceUrlRecorderWebContentsObserverBrowserTest, Basic) {
+ using Entry = ukm::builders::DocumentCreated;
+
+ GURL url = embedded_test_server()->GetURL("/title1.html");
+ content::NavigationHandleObserver observer(shell()->web_contents(), url);
+ content::NavigateToURL(shell(), url);
+ EXPECT_TRUE(observer.has_committed());
+ const ukm::UkmSource* source =
+ GetSourceForNavigationId(observer.navigation_id());
+ EXPECT_NE(nullptr, source);
+ EXPECT_EQ(url, source->url());
+ EXPECT_EQ(1u, source->urls().size());
+
+ EXPECT_EQ(url, GetAssociatedURLForWebContentsDocument());
+
+ // Check we have created a DocumentCreated event.
+ auto ukm_entries = test_ukm_recorder().GetEntriesByName(Entry::kEntryName);
+ EXPECT_EQ(1u, ukm_entries.size());
+ EXPECT_EQ(source->id(), *test_ukm_recorder().GetEntryMetric(
+ ukm_entries[0], Entry::kNavigationSourceIdName));
+ EXPECT_EQ(1, *test_ukm_recorder().GetEntryMetric(ukm_entries[0],
+ Entry::kIsMainFrameName));
+ EXPECT_NE(source->id(), ukm_entries[0]->source_id);
+}
+
+IN_PROC_BROWSER_TEST_F(SourceUrlRecorderWebContentsObserverBrowserTest,
+ IgnoreUrlInSubframe) {
+ using Entry = ukm::builders::DocumentCreated;
+
+ GURL main_url = embedded_test_server()->GetURL("/page_with_iframe.html");
+ GURL subframe_url = embedded_test_server()->GetURL("/title1.html");
+
+ content::NavigationHandleObserver main_observer(shell()->web_contents(),
+ main_url);
+ content::NavigationHandleObserver subframe_observer(shell()->web_contents(),
+ subframe_url);
+ content::NavigateToURL(shell(), main_url);
+ EXPECT_TRUE(main_observer.has_committed());
+ EXPECT_TRUE(main_observer.is_main_frame());
+ EXPECT_TRUE(subframe_observer.has_committed());
+ EXPECT_FALSE(subframe_observer.is_main_frame());
+
+ const ukm::UkmSource* source =
+ GetSourceForNavigationId(main_observer.navigation_id());
+ EXPECT_NE(nullptr, source);
+ EXPECT_EQ(main_url, source->url());
+ EXPECT_EQ(nullptr,
+ GetSourceForNavigationId(subframe_observer.navigation_id()));
+
+ EXPECT_EQ(main_url, GetAssociatedURLForWebContentsDocument());
+
+ // Check we have created a DocumentCreated event for both frames.
+ auto ukm_entries = test_ukm_recorder().GetEntriesByName(Entry::kEntryName);
+ EXPECT_EQ(2u, ukm_entries.size());
+ EXPECT_EQ(source->id(), *test_ukm_recorder().GetEntryMetric(
+ ukm_entries[0], Entry::kNavigationSourceIdName));
+ EXPECT_EQ(0, *test_ukm_recorder().GetEntryMetric(ukm_entries[0],
+ Entry::kIsMainFrameName));
+ EXPECT_EQ(source->id(), *test_ukm_recorder().GetEntryMetric(
+ ukm_entries[1], Entry::kNavigationSourceIdName));
+ EXPECT_EQ(1, *test_ukm_recorder().GetEntryMetric(ukm_entries[1],
+ Entry::kIsMainFrameName));
+ EXPECT_NE(ukm_entries[0]->source_id, ukm_entries[1]->source_id);
+ EXPECT_NE(source->id(), ukm_entries[0]->source_id);
+ EXPECT_NE(source->id(), ukm_entries[1]->source_id);
+}
+
+IN_PROC_BROWSER_TEST_F(SourceUrlRecorderWebContentsObserverDownloadBrowserTest,
+ IgnoreDownload) {
+ GURL url(embedded_test_server()->GetURL("/download-test1.lib"));
+ content::NavigationHandleObserver observer(shell()->web_contents(), url);
+ content::NavigateToURL(shell(), url);
+ EXPECT_FALSE(observer.has_committed());
+ EXPECT_TRUE(observer.is_download());
+ EXPECT_EQ(nullptr, GetSourceForNavigationId(observer.navigation_id()));
+ EXPECT_EQ(GURL(), GetAssociatedURLForWebContentsDocument());
+}
diff --git a/components/ukm/content/source_url_recorder_test.cc b/components/ukm/content/source_url_recorder_test.cc
new file mode 100644
index 0000000..aaed558
--- /dev/null
+++ b/components/ukm/content/source_url_recorder_test.cc
@@ -0,0 +1,101 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/ukm/content/source_url_recorder.h"
+#include "components/ukm/test_ukm_recorder.h"
+#include "content/public/browser/web_contents.h"
+#include "content/public/test/navigation_simulator.h"
+#include "content/public/test/test_renderer_host.h"
+#include "services/metrics/public/cpp/ukm_recorder.h"
+#include "services/metrics/public/cpp/ukm_source.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "url/gurl.h"
+
+using content::NavigationSimulator;
+
+class SourceUrlRecorderWebContentsObserverTest
+ : public content::RenderViewHostTestHarness {
+ public:
+ void SetUp() override {
+ content::RenderViewHostTestHarness::SetUp();
+ ukm::InitializeSourceUrlRecorderForWebContents(web_contents());
+ }
+
+ GURL GetAssociatedURLForWebContentsDocument() {
+ const ukm::UkmSource* src = test_ukm_recorder_.GetSourceForSourceId(
+ ukm::GetSourceIdForWebContentsDocument(web_contents()));
+ return src ? src->url() : GURL();
+ }
+
+ protected:
+ ukm::TestAutoSetUkmRecorder test_ukm_recorder_;
+};
+
+TEST_F(SourceUrlRecorderWebContentsObserverTest, Basic) {
+ GURL url("https://www.example.com/");
+ NavigationSimulator::NavigateAndCommitFromBrowser(web_contents(), url);
+
+ const auto& sources = test_ukm_recorder_.GetSources();
+ EXPECT_EQ(1ul, sources.size());
+ for (const auto& kv : sources) {
+ EXPECT_EQ(url, kv.second->url());
+ EXPECT_EQ(1u, kv.second->urls().size());
+ }
+}
+
+TEST_F(SourceUrlRecorderWebContentsObserverTest, InitialUrl) {
+ GURL initial_url("https://www.a.com/");
+ GURL final_url("https://www.b.com/");
+ auto simulator =
+ NavigationSimulator::CreateRendererInitiated(initial_url, main_rfh());
+ simulator->Start();
+ simulator->Redirect(final_url);
+ simulator->Commit();
+ const auto& sources = test_ukm_recorder_.GetSources();
+ EXPECT_EQ(1ul, sources.size());
+ for (const auto& kv : sources) {
+ EXPECT_EQ(final_url, kv.second->url());
+ EXPECT_EQ(initial_url, kv.second->urls().front());
+ }
+
+ EXPECT_EQ(final_url, GetAssociatedURLForWebContentsDocument());
+}
+
+TEST_F(SourceUrlRecorderWebContentsObserverTest, IgnoreUrlInSubframe) {
+ GURL main_frame_url("https://www.example.com/");
+ GURL sub_frame_url("https://www.example.com/iframe.html");
+ NavigationSimulator::NavigateAndCommitFromBrowser(web_contents(),
+ main_frame_url);
+ NavigationSimulator::NavigateAndCommitFromDocument(
+ sub_frame_url,
+ content::RenderFrameHostTester::For(main_rfh())->AppendChild("subframe"));
+
+ const auto& sources = test_ukm_recorder_.GetSources();
+ EXPECT_EQ(1ul, sources.size());
+ for (const auto& kv : sources) {
+ EXPECT_EQ(main_frame_url, kv.second->url());
+ EXPECT_EQ(1u, kv.second->urls().size());
+ }
+
+ EXPECT_EQ(main_frame_url, GetAssociatedURLForWebContentsDocument());
+}
+
+TEST_F(SourceUrlRecorderWebContentsObserverTest, IgnoreSameDocumentNavigation) {
+ GURL url("https://www.example.com/");
+ GURL same_document_url("https://www.example.com/#samedocument");
+ NavigationSimulator::NavigateAndCommitFromBrowser(web_contents(), url);
+ NavigationSimulator::CreateRendererInitiated(same_document_url, main_rfh())
+ ->CommitSameDocument();
+
+ EXPECT_EQ(same_document_url, web_contents()->GetLastCommittedURL());
+
+ const auto& sources = test_ukm_recorder_.GetSources();
+ EXPECT_EQ(1ul, sources.size());
+ for (const auto& kv : sources) {
+ EXPECT_EQ(url, kv.second->url());
+ EXPECT_EQ(1u, kv.second->urls().size());
+ }
+
+ EXPECT_EQ(url, GetAssociatedURLForWebContentsDocument());
+}
diff --git a/components/ukm/debug/BUILD.gn b/components/ukm/debug/BUILD.gn
new file mode 100644
index 0000000..e4f6781
--- /dev/null
+++ b/components/ukm/debug/BUILD.gn
@@ -0,0 +1,30 @@
+# Copyright 2018 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//third_party/closure_compiler/compile_js.gni")
+
+source_set("util") {
+ sources = [
+ "ukm_debug_data_extractor.cc",
+ "ukm_debug_data_extractor.h",
+ ]
+ deps = [
+ "//base",
+ "//components/ukm",
+ "//services/metrics/public/cpp:ukm_builders",
+ ]
+}
+
+js_type_check("closure_compile") {
+ deps = [
+ ":ukm_internals",
+ ]
+}
+
+js_library("ukm_internals") {
+ deps = [
+ "//ui/webui/resources/js:cr",
+ "//ui/webui/resources/js:util",
+ ]
+}
diff --git a/components/ukm/debug/ukm_debug_data_extractor.cc b/components/ukm/debug/ukm_debug_data_extractor.cc
new file mode 100644
index 0000000..4877914
--- /dev/null
+++ b/components/ukm/debug/ukm_debug_data_extractor.cc
@@ -0,0 +1,118 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/ukm/debug/ukm_debug_data_extractor.h"
+
+#include <inttypes.h>
+
+#include <map>
+#include <utility>
+#include <vector>
+
+#include "base/strings/stringprintf.h"
+#include "services/metrics/public/cpp/ukm_decode.h"
+#include "services/metrics/public/cpp/ukm_source.h"
+#include "url/gurl.h"
+
+namespace ukm {
+namespace debug {
+
+namespace {
+
+struct SourceData {
+ UkmSource* source;
+ std::vector<mojom::UkmEntry*> entries;
+};
+
+std::string GetName(const ukm::builders::EntryDecoder& decoder, uint64_t hash) {
+ const auto it = decoder.metric_map.find(hash);
+ if (it == decoder.metric_map.end())
+ return base::StringPrintf("Unknown %" PRIu64, hash);
+ return it->second;
+}
+
+base::Value ConvertEntryToValue(const ukm::builders::DecodeMap& decode_map,
+ const mojom::UkmEntry& entry) {
+ base::DictionaryValue entry_value;
+
+ const auto it = decode_map.find(entry.event_hash);
+ if (it == decode_map.end()) {
+ entry_value.SetKey("name",
+ base::Value(static_cast<double>(entry.event_hash)));
+ } else {
+ entry_value.SetKey("name", base::Value(it->second.name));
+
+ base::ListValue metrics_list;
+ auto* metrics_list_storage = &metrics_list.GetList();
+ for (const auto& metric : entry.metrics) {
+ base::DictionaryValue metric_value;
+ metric_value.SetKey("name",
+ base::Value(GetName(it->second, metric.first)));
+ metric_value.SetKey("value",
+ base::Value(static_cast<double>(metric.second)));
+ metrics_list_storage->push_back(std::move(metric_value));
+ }
+ entry_value.SetKey("metrics", std::move(metrics_list));
+ }
+ return std::move(entry_value);
+}
+
+} // namespace
+
+UkmDebugDataExtractor::UkmDebugDataExtractor() = default;
+
+UkmDebugDataExtractor::~UkmDebugDataExtractor() = default;
+
+// static
+base::Value UkmDebugDataExtractor::GetStructuredData(
+ const UkmService* ukm_service) {
+ if (!ukm_service)
+ return {};
+
+ base::DictionaryValue ukm_data;
+ ukm_data.SetKey("state", base::Value(ukm_service->recording_enabled_));
+ ukm_data.SetKey("client_id",
+ base::Value(static_cast<double>(ukm_service->client_id_)));
+ ukm_data.SetKey("session_id",
+ base::Value(static_cast<double>(ukm_service->session_id_)));
+
+ std::map<SourceId, SourceData> source_data;
+ for (const auto& kv : ukm_service->recordings_.sources) {
+ source_data[kv.first].source = kv.second.get();
+ }
+
+ for (const auto& v : ukm_service->recordings_.entries) {
+ source_data[v->source_id].entries.push_back(v.get());
+ }
+
+ base::ListValue sources_list;
+ auto* source_list_storage = &sources_list.GetList();
+ for (const auto& kv : source_data) {
+ const auto* src = kv.second.source;
+
+ base::DictionaryValue source_value;
+ if (src) {
+ source_value.SetKey("id", base::Value(static_cast<double>(src->id())));
+ source_value.SetKey("url", base::Value(src->url().spec()));
+ } else {
+ source_value.SetKey("id", base::Value(static_cast<double>(kv.first)));
+ }
+
+ base::ListValue entries_list;
+ auto* entries_list_storage = &entries_list.GetList();
+ for (auto* entry : kv.second.entries) {
+ entries_list_storage->push_back(
+ ConvertEntryToValue(ukm_service->decode_map_, *entry));
+ }
+
+ source_value.SetKey("entries", std::move(entries_list));
+
+ source_list_storage->push_back(std::move(source_value));
+ }
+ ukm_data.SetKey("sources", std::move(sources_list));
+ return std::move(ukm_data);
+}
+
+} // namespace debug
+} // namespace ukm
diff --git a/components/ukm/debug/ukm_debug_data_extractor.h b/components/ukm/debug/ukm_debug_data_extractor.h
new file mode 100644
index 0000000..dc2810d
--- /dev/null
+++ b/components/ukm/debug/ukm_debug_data_extractor.h
@@ -0,0 +1,36 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_UKM_DEBUG_UKM_DEBUG_DATA_EXTRACTOR_H_
+#define COMPONENTS_UKM_DEBUG_UKM_DEBUG_DATA_EXTRACTOR_H_
+
+#include <string>
+
+#include "base/macros.h"
+#include "base/values.h"
+#include "components/ukm/ukm_service.h"
+
+namespace ukm {
+
+class UkmService;
+
+namespace debug {
+
+// Extracts UKM data as an HTML page for debugging purposes.
+class UkmDebugDataExtractor {
+ public:
+ UkmDebugDataExtractor();
+ ~UkmDebugDataExtractor();
+
+ // Returns UKM data structured in a DictionaryValue.
+ static base::Value GetStructuredData(const UkmService* ukm_service);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(UkmDebugDataExtractor);
+};
+
+} // namespace debug
+} // namespace ukm
+
+#endif // COMPONENTS_UKM_DEBUG_UKM_DEBUG_DATA_EXTRACTOR_H_
diff --git a/components/ukm/debug/ukm_internals.html b/components/ukm/debug/ukm_internals.html
new file mode 100644
index 0000000..34c5e43
--- /dev/null
+++ b/components/ukm/debug/ukm_internals.html
@@ -0,0 +1,20 @@
+<!-- Copyright 2018 The Chromium Authors. All rights reserved.
+ Use of this source code is governed by a BSD-style license that can be
+ found in the LICENSE file.-->
+<!doctype html>
+<html lang="en">
+<meta charset="utf-8">
+<script src="chrome://resources/js/cr.js"></script>
+<script src="chrome://resources/js/promise_resolver.js"></script>
+<script src="chrome://resources/js/util.js"></script>
+<title>UKM Debug page</title>
+<h1>UKM Debug page</h1>
+<div>
+ <p>Is Enabled:<span id="state"></span></p>
+ <p>Client Id:<span id="clientid"></span></p>
+ <p>Session Id:<span id="sessionid"></span></p>
+ <h2>Sources</h2>
+ <div id="sources"></div>
+</div>
+<script src="ukm_internals.js"></script>
+</html>
diff --git a/components/ukm/debug/ukm_internals.js b/components/ukm/debug/ukm_internals.js
new file mode 100644
index 0000000..85c1c7a
--- /dev/null
+++ b/components/ukm/debug/ukm_internals.js
@@ -0,0 +1,78 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/**
+ * @typedef {{
+ * name: string,
+ * value: string
+ * }}
+ */
+var Metric;
+
+/**
+ * @typedef {{
+ * name: string,
+ * metrics: !Array<!Metric>
+ * }}
+ */
+var UkmEntry;
+
+/**
+ * @typedef {{
+ * url: string,
+ * id: string,
+ * entries: !Array<UkmEntry>,
+ * }}
+ */
+var UkmDataSource;
+
+/**
+ * The Ukm data sent from the browser.
+ * @typedef {{
+ * state: boolean,
+ * client_id: string,
+ * session_id: string,
+ * sources: !Array<!UkmDataSource>,
+ * }}
+ */
+var UkmData;
+
+/**
+ * Fetches data from the Ukm service and updates the DOM to display it as a
+ * list.
+ */
+function updateUkmData() {
+ cr.sendWithPromise('requestUkmData').then((/** @type {UkmData} */ data) => {
+ $('state').innerText = data.state ? 'True' : 'False';
+ $('clientid').innerText = data.client_id;
+ $('sessionid').innerText = data.session_id;
+
+ let sourceDiv = $('sources');
+ for (const source of data.sources) {
+ const sourceElement = document.createElement('h3');
+ if (source.url !== undefined)
+ sourceElement.innerText = `Id: ${source.id} Url: ${source.url}`;
+ else
+ sourceElement.innerText = `Id: ${source.id}`;
+ sourceDiv.appendChild(sourceElement);
+
+ for (const entry of source.entries) {
+ const entryElement = document.createElement('h4');
+ entryElement.innerText = `Entry: ${entry.name}`;
+ sourceDiv.appendChild(entryElement);
+
+ if (entry.metrics === undefined)
+ continue;
+ for (const metric of entry.metrics) {
+ const metricElement = document.createElement('h5');
+ metricElement.innerText =
+ `Metric: ${metric.name} Value: ${metric.value}`;
+ sourceDiv.appendChild(metricElement);
+ }
+ }
+ }
+ });
+}
+
+updateUkmData();
diff --git a/components/ukm/observers/DEPS b/components/ukm/observers/DEPS
new file mode 100644
index 0000000..a8ed862
--- /dev/null
+++ b/components/ukm/observers/DEPS
@@ -0,0 +1,4 @@
+include_rules = [
+ "+components/history/core/browser",
+ "+components/sync",
+]
diff --git a/components/ukm/observers/history_delete_observer.cc b/components/ukm/observers/history_delete_observer.cc
new file mode 100644
index 0000000..da42600
--- /dev/null
+++ b/components/ukm/observers/history_delete_observer.cc
@@ -0,0 +1,34 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/ukm/observers/history_delete_observer.h"
+
+#include "components/history/core/browser/history_service.h"
+
+namespace ukm {
+
+HistoryDeleteObserver::HistoryDeleteObserver() : history_observer_(this) {}
+
+HistoryDeleteObserver::~HistoryDeleteObserver() {}
+
+void HistoryDeleteObserver::ObserveServiceForDeletions(
+ history::HistoryService* history_service) {
+ if (history_service) {
+ history_observer_.Add(history_service);
+ }
+}
+
+void HistoryDeleteObserver::OnURLsDeleted(
+ history::HistoryService* history_service,
+ const history::DeletionInfo& deletion_info) {
+ if (!deletion_info.is_from_expiration())
+ OnHistoryDeleted();
+}
+
+void HistoryDeleteObserver::HistoryServiceBeingDeleted(
+ history::HistoryService* history_service) {
+ history_observer_.Remove(history_service);
+}
+
+} // namespace ukm
diff --git a/components/ukm/observers/history_delete_observer.h b/components/ukm/observers/history_delete_observer.h
new file mode 100644
index 0000000..ea48bfc
--- /dev/null
+++ b/components/ukm/observers/history_delete_observer.h
@@ -0,0 +1,44 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_UKM_OBSERVERS_HISTORY_DELETE_OBSERVER_H_
+#define COMPONENTS_UKM_OBSERVERS_HISTORY_DELETE_OBSERVER_H_
+
+#include <set>
+
+#include "base/scoped_observer.h"
+#include "components/history/core/browser/history_service_observer.h"
+
+namespace ukm {
+
+// Observes multiple HistoryService objects for any events that delete history.
+// Handles cleanup and removing observers as objects are destroyed.
+class HistoryDeleteObserver : public history::HistoryServiceObserver {
+ public:
+ HistoryDeleteObserver();
+ ~HistoryDeleteObserver() override;
+
+ // Starts observing a service for history deletions.
+ void ObserveServiceForDeletions(history::HistoryService* history_service);
+
+ // history::HistoryServiceObserver
+ void OnURLsDeleted(history::HistoryService* history_service,
+ const history::DeletionInfo& deletion_info) override;
+ void HistoryServiceBeingDeleted(
+ history::HistoryService* history_service) override;
+
+ protected:
+ virtual void OnHistoryDeleted() = 0;
+
+ private:
+ // Tracks observed history services, for cleanup.
+ ScopedObserver<history::HistoryService, history::HistoryServiceObserver>
+ history_observer_;
+
+ DISALLOW_COPY_AND_ASSIGN(HistoryDeleteObserver);
+};
+
+} // namespace ukm
+
+#endif // COMPONENTS_UKM_OBSERVERS_HISTORY_DELETE_OBSERVER_H_
diff --git a/components/ukm/observers/sync_disable_observer.cc b/components/ukm/observers/sync_disable_observer.cc
new file mode 100644
index 0000000..820dbb3
--- /dev/null
+++ b/components/ukm/observers/sync_disable_observer.cc
@@ -0,0 +1,227 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/ukm/observers/sync_disable_observer.h"
+
+#include "base/feature_list.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/stl_util.h"
+#include "components/sync/driver/sync_token_status.h"
+#include "components/sync/engine/connection_status.h"
+#include "components/unified_consent/url_keyed_data_collection_consent_helper.h"
+
+using unified_consent::UrlKeyedDataCollectionConsentHelper;
+
+namespace ukm {
+
+const base::Feature kUkmCheckAuthErrorFeature{"UkmCheckAuthError",
+ base::FEATURE_ENABLED_BY_DEFAULT};
+
+namespace {
+
+enum DisableInfo {
+ DISABLED_BY_NONE,
+ DISABLED_BY_HISTORY,
+ DISABLED_BY_INITIALIZED,
+ DISABLED_BY_HISTORY_INITIALIZED,
+ DISABLED_BY_CONNECTED,
+ DISABLED_BY_HISTORY_CONNECTED,
+ DISABLED_BY_INITIALIZED_CONNECTED,
+ DISABLED_BY_HISTORY_INITIALIZED_CONNECTED,
+ DISABLED_BY_PASSPHRASE,
+ DISABLED_BY_HISTORY_PASSPHRASE,
+ DISABLED_BY_INITIALIZED_PASSPHRASE,
+ DISABLED_BY_HISTORY_INITIALIZED_PASSPHRASE,
+ DISABLED_BY_CONNECTED_PASSPHRASE,
+ DISABLED_BY_HISTORY_CONNECTED_PASSPHRASE,
+ DISABLED_BY_INITIALIZED_CONNECTED_PASSPHRASE,
+ DISABLED_BY_HISTORY_INITIALIZED_CONNECTED_PASSPHRASE,
+ DISABLED_BY_ANONYMIZED_DATA_COLLECTION,
+ MAX_DISABLE_INFO
+};
+
+void RecordDisableInfo(DisableInfo info) {
+ UMA_HISTOGRAM_ENUMERATION("UKM.SyncDisable.Info", info, MAX_DISABLE_INFO);
+}
+
+} // namespace
+
+SyncDisableObserver::SyncDisableObserver() : sync_observer_(this) {}
+
+SyncDisableObserver::~SyncDisableObserver() {
+ for (const auto& entry : consent_helpers_) {
+ entry.second->RemoveObserver(this);
+ }
+}
+
+bool SyncDisableObserver::SyncState::AllowsUkm() const {
+ if (anonymized_data_collection_state == DataCollectionState::kIgnored)
+ return history_enabled && initialized && connected && !passphrase_protected;
+ else
+ return anonymized_data_collection_state == DataCollectionState::kEnabled;
+}
+
+bool SyncDisableObserver::SyncState::AllowsUkmWithExtension() const {
+ return AllowsUkm() && extensions_enabled && initialized && connected &&
+ !passphrase_protected;
+}
+
+// static
+SyncDisableObserver::SyncState SyncDisableObserver::GetSyncState(
+ syncer::SyncService* sync_service,
+ UrlKeyedDataCollectionConsentHelper* consent_helper) {
+ syncer::SyncTokenStatus status = sync_service->GetSyncTokenStatus();
+ SyncState state;
+ state.history_enabled = sync_service->GetPreferredDataTypes().Has(
+ syncer::HISTORY_DELETE_DIRECTIVES);
+ state.extensions_enabled =
+ sync_service->GetPreferredDataTypes().Has(syncer::EXTENSIONS);
+ state.initialized = sync_service->IsEngineInitialized();
+ state.connected = !base::FeatureList::IsEnabled(kUkmCheckAuthErrorFeature) ||
+ status.connection_status == syncer::CONNECTION_OK;
+ state.passphrase_protected =
+ state.initialized && sync_service->IsUsingSecondaryPassphrase();
+ if (consent_helper) {
+ state.anonymized_data_collection_state =
+ consent_helper->IsEnabled() ? DataCollectionState::kEnabled
+ : DataCollectionState::kDisabled;
+ }
+ return state;
+}
+
+void SyncDisableObserver::ObserveServiceForSyncDisables(
+ syncer::SyncService* sync_service,
+ PrefService* prefs,
+ bool is_unified_consent_enabled) {
+ std::unique_ptr<UrlKeyedDataCollectionConsentHelper> consent_helper;
+ if (is_unified_consent_enabled) {
+ consent_helper = UrlKeyedDataCollectionConsentHelper::
+ NewAnonymizedDataCollectionConsentHelper(true, prefs, sync_service);
+ }
+
+ SyncState state = GetSyncState(sync_service, consent_helper.get());
+ previous_states_[sync_service] = state;
+
+ if (consent_helper) {
+ consent_helper->AddObserver(this);
+ consent_helpers_[sync_service] = std::move(consent_helper);
+ }
+ sync_observer_.Add(sync_service);
+ UpdateAllProfileEnabled(false);
+}
+
+void SyncDisableObserver::UpdateAllProfileEnabled(bool must_purge) {
+ bool all_sync_states_allow_ukm = CheckSyncStateOnAllProfiles();
+ bool all_sync_states_allow_extension_ukm =
+ all_sync_states_allow_ukm && CheckSyncStateForExtensionsOnAllProfiles();
+ // Any change in sync settings needs to call OnSyncPrefsChanged so that the
+ // new settings take effect.
+ if (must_purge || (all_sync_states_allow_ukm != all_sync_states_allow_ukm_) ||
+ (all_sync_states_allow_extension_ukm !=
+ all_sync_states_allow_extension_ukm_)) {
+ all_sync_states_allow_ukm_ = all_sync_states_allow_ukm;
+ all_sync_states_allow_extension_ukm_ = all_sync_states_allow_extension_ukm;
+ OnSyncPrefsChanged(must_purge);
+ }
+}
+
+bool SyncDisableObserver::CheckSyncStateOnAllProfiles() {
+ if (previous_states_.empty())
+ return false;
+ for (const auto& kv : previous_states_) {
+ const SyncDisableObserver::SyncState& state = kv.second;
+ if (!state.AllowsUkm()) {
+ int disabled_by = 0;
+ if (state.anonymized_data_collection_state ==
+ DataCollectionState::kIgnored) {
+ if (!state.history_enabled)
+ disabled_by |= 1 << 0;
+ if (!state.initialized)
+ disabled_by |= 1 << 1;
+ if (!state.connected)
+ disabled_by |= 1 << 2;
+ if (state.passphrase_protected)
+ disabled_by |= 1 << 3;
+ } else {
+ DCHECK_EQ(DataCollectionState::kDisabled,
+ state.anonymized_data_collection_state);
+ disabled_by |= 1 << 4;
+ }
+ RecordDisableInfo(DisableInfo(disabled_by));
+ return false;
+ }
+ }
+ RecordDisableInfo(DISABLED_BY_NONE);
+ return true;
+}
+
+bool SyncDisableObserver::CheckSyncStateForExtensionsOnAllProfiles() {
+ if (previous_states_.empty())
+ return false;
+ for (const auto& kv : previous_states_) {
+ const SyncDisableObserver::SyncState& state = kv.second;
+ if (!state.extensions_enabled)
+ return false;
+ }
+ return true;
+}
+
+void SyncDisableObserver::OnStateChanged(syncer::SyncService* sync) {
+ UrlKeyedDataCollectionConsentHelper* consent_helper = nullptr;
+ auto found = consent_helpers_.find(sync);
+ if (found != consent_helpers_.end())
+ consent_helper = found->second.get();
+ UpdateSyncState(sync, consent_helper);
+}
+
+void SyncDisableObserver::OnUrlKeyedDataCollectionConsentStateChanged(
+ unified_consent::UrlKeyedDataCollectionConsentHelper* consent_helper) {
+ DCHECK(consent_helper);
+ syncer::SyncService* sync_service = nullptr;
+ for (const auto& entry : consent_helpers_) {
+ if (consent_helper == entry.second.get()) {
+ sync_service = entry.first;
+ break;
+ }
+ }
+ DCHECK(sync_service);
+ UpdateSyncState(sync_service, consent_helper);
+}
+
+void SyncDisableObserver::UpdateSyncState(
+ syncer::SyncService* sync,
+ UrlKeyedDataCollectionConsentHelper* consent_helper) {
+ DCHECK(base::ContainsKey(previous_states_, sync));
+ const SyncDisableObserver::SyncState& previous_state = previous_states_[sync];
+ DCHECK(previous_state.anonymized_data_collection_state ==
+ DataCollectionState::kIgnored ||
+ consent_helper);
+ SyncDisableObserver::SyncState state = GetSyncState(sync, consent_helper);
+ // Trigger a purge if sync state no longer allows UKM.
+ bool must_purge = previous_state.AllowsUkm() && !state.AllowsUkm();
+ previous_states_[sync] = state;
+ UpdateAllProfileEnabled(must_purge);
+}
+
+void SyncDisableObserver::OnSyncShutdown(syncer::SyncService* sync) {
+ DCHECK(base::ContainsKey(previous_states_, sync));
+ auto found = consent_helpers_.find(sync);
+ if (found != consent_helpers_.end()) {
+ found->second->RemoveObserver(this);
+ consent_helpers_.erase(found);
+ }
+ sync_observer_.Remove(sync);
+ previous_states_.erase(sync);
+ UpdateAllProfileEnabled(false);
+}
+
+bool SyncDisableObserver::SyncStateAllowsUkm() {
+ return all_sync_states_allow_ukm_;
+}
+
+bool SyncDisableObserver::SyncStateAllowsExtensionUkm() {
+ return all_sync_states_allow_extension_ukm_;
+}
+
+} // namespace ukm
diff --git a/components/ukm/observers/sync_disable_observer.h b/components/ukm/observers/sync_disable_observer.h
new file mode 100644
index 0000000..e051443
--- /dev/null
+++ b/components/ukm/observers/sync_disable_observer.h
@@ -0,0 +1,161 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_UKM_OBSERVERS_SYNC_DISABLE_OBSERVER_H_
+#define COMPONENTS_UKM_OBSERVERS_SYNC_DISABLE_OBSERVER_H_
+
+#include <map>
+
+#include "base/scoped_observer.h"
+#include "components/sync/driver/sync_service.h"
+#include "components/sync/driver/sync_service_observer.h"
+#include "components/unified_consent/url_keyed_data_collection_consent_helper.h"
+
+class PrefService;
+
+namespace ukm {
+
+// Observer that monitors whether UKM is allowed for all profiles.
+//
+// For one profile, UKM is allowed under the following conditions:
+// * If unified consent is disabled, then UKM is allowed for the profile iff
+// sync history is active;
+// * If unified consent is enabled, then UKM is allowed for the profile iff
+// URL-keyed anonymized data collectiion is enabled.
+class SyncDisableObserver
+ : public syncer::SyncServiceObserver,
+ public unified_consent::UrlKeyedDataCollectionConsentHelper::Observer {
+ public:
+ SyncDisableObserver();
+ ~SyncDisableObserver() override;
+
+ // Starts observing a service for sync disables.
+ void ObserveServiceForSyncDisables(syncer::SyncService* sync_service,
+ PrefService* pref_service,
+ bool is_unified_consent_enabled);
+
+ // Returns true iff all sync states alllow UKM to be enabled. This means that
+ // for all profiles:
+ // * If unified consent is disabled, then sync is initialized, connected, has
+ // the HISTORY_DELETE_DIRECTIVES data type enabled, and does not have a
+ // secondary passphrase enabled.
+ // * If unified consent is enabled, then URL-keyed anonymized data collection
+ // is enabled for that profile.
+ virtual bool SyncStateAllowsUkm();
+
+ // Returns true iff sync is in a state that allows UKM to capture extensions.
+ // This means that all profiles have EXTENSIONS data type enabled for syncing.
+ virtual bool SyncStateAllowsExtensionUkm();
+
+ protected:
+ // Called after state changes and some profile has sync disabled.
+ // If |must_purge| is true, sync was disabled for some profile, and
+ // local data should be purged.
+ virtual void OnSyncPrefsChanged(bool must_purge) = 0;
+
+ private:
+ // syncer::SyncServiceObserver:
+ void OnStateChanged(syncer::SyncService* sync) override;
+ void OnSyncShutdown(syncer::SyncService* sync) override;
+
+ // unified_consent::UrlKeyedDataCollectionConsentHelper::Observer:
+ void OnUrlKeyedDataCollectionConsentStateChanged(
+ unified_consent::UrlKeyedDataCollectionConsentHelper* consent_helper)
+ override;
+
+ // Recomputes all_profiles_enabled_ state from previous_states_;
+ void UpdateAllProfileEnabled(bool must_purge);
+
+ // Returns true iff all sync states in previous_states_ allow UKM.
+ // If there are no profiles being observed, this returns false.
+ bool CheckSyncStateOnAllProfiles();
+
+ // Returns true iff all sync states in previous_states_ allow extension UKM.
+ // If there are no profiles being observed, this returns false.
+ bool CheckSyncStateForExtensionsOnAllProfiles();
+
+ // Tracks observed history services, for cleanup.
+ ScopedObserver<syncer::SyncService, syncer::SyncServiceObserver>
+ sync_observer_;
+
+ enum class DataCollectionState {
+ // Matches the case when unified consent feature is disabled
+ kIgnored,
+ // Unified consent feature is enabled and the user has disabled URL-keyed
+ // anonymized data collection.
+ kDisabled,
+ // Unified consent feature is enabled and the user has enabled URL-keyed
+ // anonymized data collection.
+ kEnabled
+ };
+
+ // State data about sync services that we need to remember.
+ struct SyncState {
+ // Returns true if this sync state allows UKM:
+ // * If unified consent is disabled, then sync is initialized, connected,
+ // has history data type enabled, and does not have a secondary passphrase
+ // enabled.
+ // * If unified consent is enabled, then URL-keyed anonymized data
+ // collection is enabled.
+ bool AllowsUkm() const;
+ // Returns true if |AllowUkm| and if sync extensions are enabled.
+ bool AllowsUkmWithExtension() const;
+
+ // If the user has history sync enabled.
+ bool history_enabled = false;
+ // If the user has extension sync enabled.
+ bool extensions_enabled = false;
+ // Whether the sync service has been initialized.
+ bool initialized = false;
+ // Whether the sync service is active and operational.
+ bool connected = false;
+ // Whether user data is hidden by a secondary passphrase.
+ // This is not valid if the state is not initialized.
+ bool passphrase_protected = false;
+
+ // Whether anonymized data collection is enabled.
+ // Note: This is not managed by sync service. It was added in this enum
+ // for convenience.
+ DataCollectionState anonymized_data_collection_state =
+ DataCollectionState::kIgnored;
+ };
+
+ // Updates the sync state for |sync| service. Updates all profiles if needed.
+ void UpdateSyncState(
+ syncer::SyncService* sync,
+ unified_consent::UrlKeyedDataCollectionConsentHelper* consent_helper);
+
+ // Gets the current state of a SyncService.
+ // A non-null |consent_helper| implies that Unified Consent is enabled.
+ static SyncState GetSyncState(
+ syncer::SyncService* sync,
+ unified_consent::UrlKeyedDataCollectionConsentHelper* consent_helper);
+
+ // The state of the sync services being observed.
+ std::map<syncer::SyncService*, SyncState> previous_states_;
+
+ // The list of URL-keyed anonymized data collection consent helpers.
+ //
+ // Note: UrlKeyedDataCollectionConsentHelper do not rely on sync when
+ // unified consent feature is enabled but there must be exactly one per
+ // Chromium profile. As there is a single sync service per profile, it is safe
+ // to key them by sync service instead of introducing an additional map.
+ std::map<
+ syncer::SyncService*,
+ std::unique_ptr<unified_consent::UrlKeyedDataCollectionConsentHelper>>
+ consent_helpers_;
+
+ // Tracks if UKM is allowed on all profiles after the last state change.
+ bool all_sync_states_allow_ukm_ = false;
+
+ // Tracks if extension sync was enabled on all profiles after the last state
+ // change.
+ bool all_sync_states_allow_extension_ukm_ = false;
+
+ DISALLOW_COPY_AND_ASSIGN(SyncDisableObserver);
+};
+
+} // namespace ukm
+
+#endif // COMPONENTS_UKM_OBSERVERS_SYNC_DISABLE_OBSERVER_H_
diff --git a/components/ukm/observers/sync_disable_observer_unittest.cc b/components/ukm/observers/sync_disable_observer_unittest.cc
new file mode 100644
index 0000000..5208ebb
--- /dev/null
+++ b/components/ukm/observers/sync_disable_observer_unittest.cc
@@ -0,0 +1,339 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/ukm/observers/sync_disable_observer.h"
+
+#include "base/observer_list.h"
+#include "components/sync/driver/fake_sync_service.h"
+#include "components/sync/driver/sync_token_status.h"
+#include "components/sync/engine/connection_status.h"
+#include "components/sync_preferences/testing_pref_service_syncable.h"
+#include "components/unified_consent/pref_names.h"
+#include "components/unified_consent/unified_consent_service.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace ukm {
+
+namespace {
+
+class MockSyncService : public syncer::FakeSyncService {
+ public:
+ MockSyncService() {}
+ ~MockSyncService() override { Shutdown(); }
+
+ void SetStatus(bool has_passphrase, bool history_enabled) {
+ initialized_ = true;
+ has_passphrase_ = has_passphrase;
+ preferred_data_types_ =
+ history_enabled
+ ? syncer::ModelTypeSet(syncer::HISTORY_DELETE_DIRECTIVES)
+ : syncer::ModelTypeSet();
+ NotifyObserversOfStateChanged();
+ }
+
+ void SetConnectionStatus(syncer::ConnectionStatus status) {
+ connection_status_ = status;
+ NotifyObserversOfStateChanged();
+ }
+
+ void Shutdown() override {
+ for (auto& observer : observers_) {
+ observer.OnSyncShutdown(this);
+ }
+ }
+
+ void NotifyObserversOfStateChanged() {
+ for (auto& observer : observers_) {
+ observer.OnStateChanged(this);
+ }
+ }
+
+ private:
+ // syncer::FakeSyncService:
+ void AddObserver(syncer::SyncServiceObserver* observer) override {
+ observers_.AddObserver(observer);
+ }
+ void RemoveObserver(syncer::SyncServiceObserver* observer) override {
+ observers_.RemoveObserver(observer);
+ }
+ TransportState GetTransportState() const override {
+ return initialized_ ? TransportState::ACTIVE : TransportState::INITIALIZING;
+ }
+ bool IsUsingSecondaryPassphrase() const override { return has_passphrase_; }
+ syncer::ModelTypeSet GetPreferredDataTypes() const override {
+ return preferred_data_types_;
+ }
+ syncer::SyncTokenStatus GetSyncTokenStatus() const override {
+ syncer::SyncTokenStatus status;
+ status.connection_status = connection_status_;
+ return status;
+ }
+
+ bool initialized_ = false;
+ bool has_passphrase_ = false;
+ syncer::ConnectionStatus connection_status_ = syncer::CONNECTION_OK;
+ syncer::ModelTypeSet preferred_data_types_;
+
+ // The list of observers of the SyncService state.
+ base::ObserverList<syncer::SyncServiceObserver>::Unchecked observers_;
+
+ DISALLOW_COPY_AND_ASSIGN(MockSyncService);
+};
+
+class TestSyncDisableObserver : public SyncDisableObserver {
+ public:
+ TestSyncDisableObserver() : purged_(false), notified_(false) {}
+ ~TestSyncDisableObserver() override {}
+
+ bool ResetPurged() {
+ bool was_purged = purged_;
+ purged_ = false;
+ return was_purged;
+ }
+
+ bool ResetNotified() {
+ bool notified = notified_;
+ notified_ = false;
+ return notified;
+ }
+
+ private:
+ // SyncDisableObserver:
+ void OnSyncPrefsChanged(bool must_purge) override {
+ notified_ = true;
+ purged_ = purged_ || must_purge;
+ }
+ bool purged_;
+ bool notified_;
+ DISALLOW_COPY_AND_ASSIGN(TestSyncDisableObserver);
+};
+
+class SyncDisableObserverTest : public testing::Test {
+ public:
+ SyncDisableObserverTest() {}
+ void RegisterUrlKeyedAnonymizedDataCollectionPref(
+ sync_preferences::TestingPrefServiceSyncable& prefs) {
+ unified_consent::UnifiedConsentService::RegisterPrefs(prefs.registry());
+ }
+
+ void SetUrlKeyedAnonymizedDataCollectionEnabled(PrefService* prefs,
+ bool enabled) {
+ prefs->SetBoolean(
+ unified_consent::prefs::kUrlKeyedAnonymizedDataCollectionEnabled,
+ enabled);
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(SyncDisableObserverTest);
+};
+
+} // namespace
+
+TEST_F(SyncDisableObserverTest, NoProfiles) {
+ TestSyncDisableObserver observer;
+ EXPECT_FALSE(observer.SyncStateAllowsUkm());
+ EXPECT_FALSE(observer.ResetNotified());
+ EXPECT_FALSE(observer.ResetPurged());
+}
+
+TEST_F(SyncDisableObserverTest, OneEnabled_UnifiedConsentDisabled) {
+ TestSyncDisableObserver observer;
+ MockSyncService sync;
+ sync.SetStatus(false, true);
+ observer.ObserveServiceForSyncDisables(&sync, nullptr, false);
+ EXPECT_TRUE(observer.SyncStateAllowsUkm());
+ EXPECT_TRUE(observer.ResetNotified());
+ EXPECT_FALSE(observer.ResetPurged());
+}
+
+TEST_F(SyncDisableObserverTest, OneEnabled_UnifiedConsentEnabled) {
+ sync_preferences::TestingPrefServiceSyncable prefs;
+ RegisterUrlKeyedAnonymizedDataCollectionPref(prefs);
+ SetUrlKeyedAnonymizedDataCollectionEnabled(&prefs, true);
+ MockSyncService sync;
+ for (bool has_passphrase : {true, false}) {
+ for (bool history_enabled : {true, false}) {
+ TestSyncDisableObserver observer;
+ sync.SetStatus(has_passphrase, history_enabled);
+ observer.ObserveServiceForSyncDisables(&sync, &prefs, true);
+ EXPECT_TRUE(observer.SyncStateAllowsUkm());
+ EXPECT_TRUE(observer.ResetNotified());
+ EXPECT_FALSE(observer.ResetPurged());
+ }
+ }
+}
+
+TEST_F(SyncDisableObserverTest, Passphrased_UnifiedConsentDisabled) {
+ MockSyncService sync;
+ sync.SetStatus(true, true);
+ TestSyncDisableObserver observer;
+ observer.ObserveServiceForSyncDisables(&sync, nullptr, false);
+ EXPECT_FALSE(observer.SyncStateAllowsUkm());
+ EXPECT_FALSE(observer.ResetNotified());
+ EXPECT_FALSE(observer.ResetPurged());
+}
+
+TEST_F(SyncDisableObserverTest, HistoryDisabled_UnifiedConsentDisabled) {
+ TestSyncDisableObserver observer;
+ MockSyncService sync;
+ sync.SetStatus(false, false);
+ observer.ObserveServiceForSyncDisables(&sync, nullptr, false);
+ EXPECT_FALSE(observer.SyncStateAllowsUkm());
+ EXPECT_FALSE(observer.ResetNotified());
+ EXPECT_FALSE(observer.ResetPurged());
+}
+
+TEST_F(SyncDisableObserverTest, AuthError_UnifiedConsentDisabled) {
+ TestSyncDisableObserver observer;
+ MockSyncService sync;
+ sync.SetStatus(false, true);
+ observer.ObserveServiceForSyncDisables(&sync, nullptr, false);
+ EXPECT_TRUE(observer.SyncStateAllowsUkm());
+ sync.SetConnectionStatus(syncer::CONNECTION_AUTH_ERROR);
+ EXPECT_FALSE(observer.SyncStateAllowsUkm());
+ sync.SetConnectionStatus(syncer::CONNECTION_OK);
+ EXPECT_TRUE(observer.SyncStateAllowsUkm());
+}
+
+TEST_F(SyncDisableObserverTest, MixedProfiles1_UnifiedConsentDisabled) {
+ TestSyncDisableObserver observer;
+ MockSyncService sync1;
+ sync1.SetStatus(false, false);
+ observer.ObserveServiceForSyncDisables(&sync1, nullptr, false);
+ MockSyncService sync2;
+ sync2.SetStatus(false, true);
+ observer.ObserveServiceForSyncDisables(&sync2, nullptr, false);
+ EXPECT_FALSE(observer.SyncStateAllowsUkm());
+ EXPECT_FALSE(observer.ResetNotified());
+ EXPECT_FALSE(observer.ResetPurged());
+}
+
+TEST_F(SyncDisableObserverTest, MixedProfiles2_UnifiedConsentDisabled) {
+ TestSyncDisableObserver observer;
+ MockSyncService sync1;
+ sync1.SetStatus(false, true);
+ observer.ObserveServiceForSyncDisables(&sync1, nullptr, false);
+ EXPECT_TRUE(observer.ResetNotified());
+
+ MockSyncService sync2;
+ sync2.SetStatus(false, false);
+ observer.ObserveServiceForSyncDisables(&sync2, nullptr, false);
+ EXPECT_FALSE(observer.SyncStateAllowsUkm());
+ EXPECT_TRUE(observer.ResetNotified());
+ EXPECT_FALSE(observer.ResetPurged());
+ sync2.Shutdown();
+ EXPECT_TRUE(observer.SyncStateAllowsUkm());
+ EXPECT_TRUE(observer.ResetNotified());
+ EXPECT_FALSE(observer.ResetPurged());
+}
+
+TEST_F(SyncDisableObserverTest, TwoEnabled_UnifiedConsentDisabled) {
+ TestSyncDisableObserver observer;
+ MockSyncService sync1;
+ sync1.SetStatus(false, true);
+ observer.ObserveServiceForSyncDisables(&sync1, nullptr, false);
+ EXPECT_TRUE(observer.ResetNotified());
+ MockSyncService sync2;
+ sync2.SetStatus(false, true);
+ observer.ObserveServiceForSyncDisables(&sync2, nullptr, false);
+ EXPECT_TRUE(observer.SyncStateAllowsUkm());
+ EXPECT_FALSE(observer.ResetNotified());
+ EXPECT_FALSE(observer.ResetPurged());
+}
+
+TEST_F(SyncDisableObserverTest, TwoEnabled_UnifiedConsentEnabled) {
+ sync_preferences::TestingPrefServiceSyncable prefs2;
+ RegisterUrlKeyedAnonymizedDataCollectionPref(prefs2);
+ TestSyncDisableObserver observer;
+
+ // First profile has sync enabled.
+ MockSyncService sync1;
+ sync1.SetStatus(false, true);
+ observer.ObserveServiceForSyncDisables(&sync1, nullptr, false);
+ EXPECT_TRUE(observer.ResetNotified());
+
+ // Second profile has URL-keyed anonymized data collection enabled.
+ MockSyncService sync2;
+ SetUrlKeyedAnonymizedDataCollectionEnabled(&prefs2, true);
+ observer.ObserveServiceForSyncDisables(&sync2, &prefs2, true);
+ EXPECT_TRUE(observer.SyncStateAllowsUkm());
+ EXPECT_FALSE(observer.ResetNotified());
+ EXPECT_FALSE(observer.ResetPurged());
+}
+
+TEST_F(SyncDisableObserverTest, OneAddRemove_UnifiedConsentDisabled) {
+ TestSyncDisableObserver observer;
+ MockSyncService sync;
+ observer.ObserveServiceForSyncDisables(&sync, nullptr, false);
+ EXPECT_FALSE(observer.SyncStateAllowsUkm());
+ EXPECT_FALSE(observer.ResetNotified());
+ EXPECT_FALSE(observer.ResetPurged());
+ sync.SetStatus(false, true);
+ EXPECT_TRUE(observer.SyncStateAllowsUkm());
+ EXPECT_TRUE(observer.ResetNotified());
+ EXPECT_FALSE(observer.ResetPurged());
+ sync.Shutdown();
+ EXPECT_FALSE(observer.SyncStateAllowsUkm());
+ EXPECT_TRUE(observer.ResetNotified());
+ EXPECT_FALSE(observer.ResetPurged());
+}
+
+TEST_F(SyncDisableObserverTest, OneAddRemove_UnifiedConsentEnabled) {
+ sync_preferences::TestingPrefServiceSyncable prefs;
+ RegisterUrlKeyedAnonymizedDataCollectionPref(prefs);
+ TestSyncDisableObserver observer;
+ MockSyncService sync;
+ observer.ObserveServiceForSyncDisables(&sync, &prefs, true);
+ EXPECT_FALSE(observer.SyncStateAllowsUkm());
+ EXPECT_FALSE(observer.ResetNotified());
+ EXPECT_FALSE(observer.ResetPurged());
+ SetUrlKeyedAnonymizedDataCollectionEnabled(&prefs, true);
+ EXPECT_TRUE(observer.SyncStateAllowsUkm());
+ EXPECT_TRUE(observer.ResetNotified());
+ EXPECT_FALSE(observer.ResetPurged());
+ sync.Shutdown();
+ EXPECT_FALSE(observer.SyncStateAllowsUkm());
+ EXPECT_TRUE(observer.ResetNotified());
+ EXPECT_FALSE(observer.ResetPurged());
+}
+
+TEST_F(SyncDisableObserverTest, PurgeOnDisable_UnifiedConsentDisabled) {
+ TestSyncDisableObserver observer;
+ MockSyncService sync;
+ sync.SetStatus(false, true);
+ observer.ObserveServiceForSyncDisables(&sync, nullptr, false);
+ EXPECT_TRUE(observer.SyncStateAllowsUkm());
+ EXPECT_TRUE(observer.ResetNotified());
+ EXPECT_FALSE(observer.ResetPurged());
+ sync.SetStatus(false, false);
+ EXPECT_FALSE(observer.SyncStateAllowsUkm());
+ EXPECT_TRUE(observer.ResetNotified());
+ EXPECT_TRUE(observer.ResetPurged());
+ sync.Shutdown();
+ EXPECT_FALSE(observer.SyncStateAllowsUkm());
+ EXPECT_FALSE(observer.ResetNotified());
+ EXPECT_FALSE(observer.ResetPurged());
+}
+
+TEST_F(SyncDisableObserverTest, PurgeOnDisable_UnifiedConsentEnabled) {
+ sync_preferences::TestingPrefServiceSyncable prefs;
+ RegisterUrlKeyedAnonymizedDataCollectionPref(prefs);
+ TestSyncDisableObserver observer;
+ MockSyncService sync;
+ SetUrlKeyedAnonymizedDataCollectionEnabled(&prefs, true);
+ observer.ObserveServiceForSyncDisables(&sync, &prefs, true);
+ EXPECT_TRUE(observer.SyncStateAllowsUkm());
+ EXPECT_TRUE(observer.ResetNotified());
+ EXPECT_FALSE(observer.ResetPurged());
+ SetUrlKeyedAnonymizedDataCollectionEnabled(&prefs, false);
+ EXPECT_FALSE(observer.SyncStateAllowsUkm());
+ EXPECT_TRUE(observer.ResetNotified());
+ EXPECT_TRUE(observer.ResetPurged());
+ sync.Shutdown();
+ EXPECT_FALSE(observer.SyncStateAllowsUkm());
+ EXPECT_FALSE(observer.ResetNotified());
+ EXPECT_FALSE(observer.ResetPurged());
+}
+
+} // namespace ukm
diff --git a/components/ukm/persisted_logs_metrics_impl.cc b/components/ukm/persisted_logs_metrics_impl.cc
new file mode 100644
index 0000000..fa5fa97
--- /dev/null
+++ b/components/ukm/persisted_logs_metrics_impl.cc
@@ -0,0 +1,36 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/ukm/persisted_logs_metrics_impl.h"
+
+#include "base/metrics/histogram_macros.h"
+
+namespace ukm {
+
+PersistedLogsMetricsImpl::PersistedLogsMetricsImpl() = default;
+
+PersistedLogsMetricsImpl::~PersistedLogsMetricsImpl() = default;
+
+void PersistedLogsMetricsImpl::RecordLogReadStatus(
+ metrics::PersistedLogsMetrics::LogReadStatus status) {
+ UMA_HISTOGRAM_ENUMERATION("UKM.PersistentLogRecall.Status", status,
+ metrics::PersistedLogsMetrics::END_RECALL_STATUS);
+}
+
+void PersistedLogsMetricsImpl::RecordCompressionRatio(size_t compressed_size,
+ size_t original_size) {
+ UMA_HISTOGRAM_PERCENTAGE(
+ "UKM.ProtoCompressionRatio",
+ static_cast<int>(100 * compressed_size / original_size));
+}
+
+void PersistedLogsMetricsImpl::RecordDroppedLogSize(size_t size) {
+ UMA_HISTOGRAM_COUNTS_1M("UKM.UnsentLogs.DroppedSize", static_cast<int>(size));
+}
+
+void PersistedLogsMetricsImpl::RecordDroppedLogsNum(int dropped_logs_num) {
+ UMA_HISTOGRAM_COUNTS_10000("UKM.UnsentLogs.NumDropped", dropped_logs_num);
+}
+
+} // namespace ukm
diff --git a/components/ukm/persisted_logs_metrics_impl.h b/components/ukm/persisted_logs_metrics_impl.h
new file mode 100644
index 0000000..a1aefe6
--- /dev/null
+++ b/components/ukm/persisted_logs_metrics_impl.h
@@ -0,0 +1,33 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_UKM_PERSISTED_LOGS_METRICS_IMPL_H_
+#define COMPONENTS_UKM_PERSISTED_LOGS_METRICS_IMPL_H_
+
+#include "base/macros.h"
+#include "components/metrics/persisted_logs_metrics.h"
+
+namespace ukm {
+
+// Implementation for recording metrics from PersistedLogs.
+class PersistedLogsMetricsImpl : public metrics::PersistedLogsMetrics {
+ public:
+ PersistedLogsMetricsImpl();
+ ~PersistedLogsMetricsImpl() override;
+
+ // metrics::PersistedLogsMetrics:
+ void RecordLogReadStatus(
+ metrics::PersistedLogsMetrics::LogReadStatus status) override;
+ void RecordCompressionRatio(size_t compressed_size,
+ size_t original_size) override;
+ void RecordDroppedLogSize(size_t size) override;
+ void RecordDroppedLogsNum(int dropped_logs_num) override;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(PersistedLogsMetricsImpl);
+};
+
+} // namespace ukm
+
+#endif // COMPONENTS_UKM_PERSISTED_LOGS_METRICS_IMPL_H_
diff --git a/components/ukm/test_ukm_recorder.cc b/components/ukm/test_ukm_recorder.cc
new file mode 100644
index 0000000..445348f
--- /dev/null
+++ b/components/ukm/test_ukm_recorder.cc
@@ -0,0 +1,143 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/ukm/test_ukm_recorder.h"
+
+#include <algorithm>
+#include <iterator>
+
+#include "base/logging.h"
+#include "base/metrics/metrics_hashes.h"
+#include "base/task/post_task.h"
+#include "base/threading/sequenced_task_runner_handle.h"
+#include "services/metrics/public/cpp/delegating_ukm_recorder.h"
+#include "services/metrics/public/cpp/ukm_source.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace ukm {
+
+namespace {
+
+// Merge the data from |in| to |out|.
+void MergeEntry(const mojom::UkmEntry* in, mojom::UkmEntry* out) {
+ if (out->event_hash) {
+ EXPECT_EQ(out->source_id, in->source_id);
+ EXPECT_EQ(out->event_hash, in->event_hash);
+ } else {
+ out->event_hash = in->event_hash;
+ out->source_id = in->source_id;
+ }
+ for (const auto& metric : in->metrics) {
+ out->metrics.emplace(metric);
+ }
+}
+
+} // namespace
+
+TestUkmRecorder::TestUkmRecorder() {
+ EnableRecording(/*extensions=*/true);
+ StoreWhitelistedEntries();
+ DisableSamplingForTesting();
+}
+
+TestUkmRecorder::~TestUkmRecorder() {
+};
+
+bool TestUkmRecorder::ShouldRestrictToWhitelistedSourceIds() const {
+ // In tests, we want to record all source ids (not just those that are
+ // whitelisted).
+ return false;
+}
+
+bool TestUkmRecorder::ShouldRestrictToWhitelistedEntries() const {
+ // In tests, we want to record all entries (not just those that are
+ // whitelisted).
+ return false;
+}
+
+const UkmSource* TestUkmRecorder::GetSourceForSourceId(
+ SourceId source_id) const {
+ const UkmSource* source = nullptr;
+ for (const auto& kv : sources()) {
+ if (kv.second->id() == source_id) {
+ DCHECK_EQ(nullptr, source);
+ source = kv.second.get();
+ }
+ }
+ return source;
+}
+
+std::vector<const mojom::UkmEntry*> TestUkmRecorder::GetEntriesByName(
+ base::StringPiece entry_name) const {
+ uint64_t hash = base::HashMetricName(entry_name);
+ std::vector<const mojom::UkmEntry*> result;
+ for (const auto& it : entries()) {
+ if (it->event_hash == hash)
+ result.push_back(it.get());
+ }
+ return result;
+}
+
+std::map<ukm::SourceId, mojom::UkmEntryPtr>
+TestUkmRecorder::GetMergedEntriesByName(base::StringPiece entry_name) const {
+ uint64_t hash = base::HashMetricName(entry_name);
+ std::map<ukm::SourceId, mojom::UkmEntryPtr> result;
+ for (const auto& it : entries()) {
+ if (it->event_hash != hash)
+ continue;
+ mojom::UkmEntryPtr& entry_ptr = result[it->source_id];
+ if (!entry_ptr)
+ entry_ptr = mojom::UkmEntry::New();
+ MergeEntry(it.get(), entry_ptr.get());
+ }
+ return result;
+}
+
+void TestUkmRecorder::ExpectEntrySourceHasUrl(const mojom::UkmEntry* entry,
+ const GURL& url) const {
+ const UkmSource* src = GetSourceForSourceId(entry->source_id);
+ if (src == nullptr) {
+ FAIL() << "Entry source id has no associated Source.";
+ return;
+ }
+ EXPECT_EQ(src->url(), url);
+}
+
+// static
+bool TestUkmRecorder::EntryHasMetric(const mojom::UkmEntry* entry,
+ base::StringPiece metric_name) {
+ return GetEntryMetric(entry, metric_name) != nullptr;
+}
+
+// static
+const int64_t* TestUkmRecorder::GetEntryMetric(const mojom::UkmEntry* entry,
+ base::StringPiece metric_name) {
+ uint64_t hash = base::HashMetricName(metric_name);
+ const auto it = entry->metrics.find(hash);
+ if (it != entry->metrics.end())
+ return &it->second;
+ return nullptr;
+}
+
+// static
+void TestUkmRecorder::ExpectEntryMetric(const mojom::UkmEntry* entry,
+ base::StringPiece metric_name,
+ int64_t expected_value) {
+ const int64_t* metric = GetEntryMetric(entry, metric_name);
+ if (metric == nullptr) {
+ FAIL() << "Failed to find metric for event: " << metric_name;
+ return;
+ }
+ EXPECT_EQ(expected_value, *metric) << " for metric:" << metric_name;
+}
+
+TestAutoSetUkmRecorder::TestAutoSetUkmRecorder() : self_ptr_factory_(this) {
+ DelegatingUkmRecorder::Get()->AddDelegate(self_ptr_factory_.GetWeakPtr());
+}
+
+TestAutoSetUkmRecorder::~TestAutoSetUkmRecorder() {
+ DelegatingUkmRecorder::Get()->RemoveDelegate(this);
+};
+
+} // namespace ukm
diff --git a/components/ukm/test_ukm_recorder.h b/components/ukm/test_ukm_recorder.h
new file mode 100644
index 0000000..6950c3c
--- /dev/null
+++ b/components/ukm/test_ukm_recorder.h
@@ -0,0 +1,94 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_UKM_TEST_UKM_RECORDER_H_
+#define COMPONENTS_UKM_TEST_UKM_RECORDER_H_
+
+#include <stddef.h>
+
+#include <memory>
+#include <set>
+#include <utility>
+#include <vector>
+
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#include "base/memory/weak_ptr.h"
+#include "components/ukm/ukm_recorder_impl.h"
+#include "services/metrics/public/cpp/ukm_recorder.h"
+#include "services/metrics/public/mojom/ukm_interface.mojom.h"
+#include "url/gurl.h"
+
+namespace ukm {
+
+// Wraps an UkmRecorder with additional accessors used for testing.
+class TestUkmRecorder : public UkmRecorderImpl {
+ public:
+ TestUkmRecorder();
+ ~TestUkmRecorder() override;
+
+ bool ShouldRestrictToWhitelistedSourceIds() const override;
+ bool ShouldRestrictToWhitelistedEntries() const override;
+
+ size_t sources_count() const { return sources().size(); }
+
+ size_t entries_count() const { return entries().size(); }
+
+ using UkmRecorderImpl::UpdateSourceURL;
+ using UkmRecorderImpl::RecordOtherURL;
+
+ // Gets all recorded UkmSource data.
+ const std::map<ukm::SourceId, std::unique_ptr<UkmSource>>& GetSources()
+ const {
+ return sources();
+ }
+
+ // Gets UkmSource data for a single SourceId.
+ const UkmSource* GetSourceForSourceId(ukm::SourceId source_id) const;
+
+ // Gets all of the entries recorded for entry name.
+ std::vector<const mojom::UkmEntry*> GetEntriesByName(
+ base::StringPiece entry_name) const;
+
+ // Gets the data for all entries with given entry name, merged to one entry
+ // for each source id. Intended for singular="true" metrics.
+ std::map<ukm::SourceId, mojom::UkmEntryPtr> GetMergedEntriesByName(
+ base::StringPiece entry_name) const;
+
+ // Checks if an entry is associated with a url.
+ void ExpectEntrySourceHasUrl(const mojom::UkmEntry* entry,
+ const GURL& url) const;
+
+ // Expects the value of a metric from an entry.
+ static void ExpectEntryMetric(const mojom::UkmEntry* entry,
+ base::StringPiece metric_name,
+ int64_t expected_value);
+
+ // Checks if an entry contains a specific metric.
+ static bool EntryHasMetric(const mojom::UkmEntry* entry,
+ base::StringPiece metric_name);
+
+ // Gets the value of a metric from an entry. Returns nullptr if the metric is
+ // not found.
+ static const int64_t* GetEntryMetric(const mojom::UkmEntry* entry,
+ base::StringPiece metric_name);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(TestUkmRecorder);
+};
+
+// Similar to a TestUkmRecorder, but also sets itself as the global UkmRecorder
+// on construction, and unsets itself on destruction.
+class TestAutoSetUkmRecorder : public TestUkmRecorder {
+ public:
+ TestAutoSetUkmRecorder();
+ ~TestAutoSetUkmRecorder() override;
+
+ private:
+ base::WeakPtrFactory<TestAutoSetUkmRecorder> self_ptr_factory_;
+};
+
+} // namespace ukm
+
+#endif // COMPONENTS_UKM_TEST_UKM_RECORDER_H_
diff --git a/components/ukm/ukm_pref_names.cc b/components/ukm/ukm_pref_names.cc
new file mode 100644
index 0000000..23ad637
--- /dev/null
+++ b/components/ukm/ukm_pref_names.cc
@@ -0,0 +1,20 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/ukm/ukm_pref_names.h"
+
+namespace ukm {
+namespace prefs {
+
+// A random uint64 value unique for each chrome install.
+const char kUkmClientId[] = "ukm.client_id";
+
+// Preference which stores serialized UKM logs to be uploaded.
+const char kUkmPersistedLogs[] = "ukm.persisted_logs";
+
+// Preference which stores the UKM session id.
+const char kUkmSessionId[] = "ukm.session_id";
+
+} // namespace prefs
+} // namespace ukm
diff --git a/components/ukm/ukm_pref_names.h b/components/ukm/ukm_pref_names.h
new file mode 100644
index 0000000..b0e4208
--- /dev/null
+++ b/components/ukm/ukm_pref_names.h
@@ -0,0 +1,20 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_UKM_UKM_PREF_NAMES_H_
+#define COMPONENTS_UKM_UKM_PREF_NAMES_H_
+
+namespace ukm {
+namespace prefs {
+
+// Alphabetical list of preference names specific to the UKM
+// component. Keep alphabetized, and document each in the .cc file.
+extern const char kUkmClientId[];
+extern const char kUkmPersistedLogs[];
+extern const char kUkmSessionId[];
+
+} // namespace prefs
+} // namespace ukm
+
+#endif // COMPONENTS_UKM_UKM_PREF_NAMES_H_
diff --git a/components/ukm/ukm_recorder_impl.cc b/components/ukm/ukm_recorder_impl.cc
new file mode 100644
index 0000000..2f0dc35
--- /dev/null
+++ b/components/ukm/ukm_recorder_impl.cc
@@ -0,0 +1,606 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/ukm/ukm_recorder_impl.h"
+
+#include <limits>
+#include <memory>
+#include <string>
+#include <utility>
+
+#include "base/feature_list.h"
+#include "base/metrics/field_trial.h"
+#include "base/metrics/field_trial_params.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/metrics/metrics_hashes.h"
+#include "base/rand_util.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_split.h"
+#include "components/variations/variations_associated_data.h"
+#include "services/metrics/public/cpp/ukm_decode.h"
+#include "services/metrics/public/cpp/ukm_source.h"
+#include "services/metrics/public/cpp/ukm_source_id.h"
+#include "third_party/metrics_proto/ukm/entry.pb.h"
+#include "third_party/metrics_proto/ukm/report.pb.h"
+#include "third_party/metrics_proto/ukm/source.pb.h"
+#include "url/gurl.h"
+
+namespace ukm {
+
+namespace {
+
+// Note: kChromeUIScheme is defined in content, which this code can't
+// depend on - since it's used by iOS too. kExtensionScheme is defined
+// in extensions which also isn't always available here. kAppScheme
+// will be defined in code that isn't available here.
+const char kChromeUIScheme[] = "chrome";
+const char kExtensionScheme[] = "chrome-extension";
+const char kAppScheme[] = "app";
+
+const base::Feature kUkmSamplingRateFeature{"UkmSamplingRate",
+ base::FEATURE_DISABLED_BY_DEFAULT};
+
+// Gets the list of whitelisted Entries as string. Format is a comma separated
+// list of Entry names (as strings).
+std::string GetWhitelistEntries() {
+ return base::GetFieldTrialParamValueByFeature(kUkmFeature,
+ "WhitelistEntries");
+}
+
+bool IsWhitelistedSourceId(SourceId source_id) {
+ return GetSourceIdType(source_id) == SourceIdType::NAVIGATION_ID ||
+ GetSourceIdType(source_id) == SourceIdType::APP_ID;
+}
+
+// Gets the maximum number of Sources we'll keep in memory before discarding any
+// new ones being added.
+size_t GetMaxSources() {
+ constexpr size_t kDefaultMaxSources = 500;
+ return static_cast<size_t>(base::GetFieldTrialParamByFeatureAsInt(
+ kUkmFeature, "MaxSources", kDefaultMaxSources));
+}
+
+// Gets the maximum number of unreferenced Sources kept after purging sources
+// that were added to the log.
+size_t GetMaxKeptSources() {
+ constexpr size_t kDefaultMaxKeptSources = 100;
+ return static_cast<size_t>(base::GetFieldTrialParamByFeatureAsInt(
+ kUkmFeature, "MaxKeptSources", kDefaultMaxKeptSources));
+}
+
+// Gets the maximum number of Entries we'll keep in memory before discarding any
+// new ones being added.
+size_t GetMaxEntries() {
+ constexpr size_t kDefaultMaxEntries = 5000;
+ return static_cast<size_t>(base::GetFieldTrialParamByFeatureAsInt(
+ kUkmFeature, "MaxEntries", kDefaultMaxEntries));
+}
+
+// Returns whether |url| has one of the schemes supported for logging to UKM.
+// URLs with other schemes will not be logged.
+bool HasSupportedScheme(const GURL& url) {
+ return url.SchemeIsHTTPOrHTTPS() || url.SchemeIs(url::kFtpScheme) ||
+ url.SchemeIs(url::kAboutScheme) || url.SchemeIs(kChromeUIScheme) ||
+ url.SchemeIs(kExtensionScheme) || url.SchemeIs(kAppScheme);
+}
+
+// True if we should record the initial_url field of the UKM Source proto.
+bool ShouldRecordInitialUrl() {
+ return base::GetFieldTrialParamByFeatureAsBool(kUkmFeature,
+ "RecordInitialUrl", false);
+}
+
+enum class DroppedDataReason {
+ NOT_DROPPED = 0,
+ RECORDING_DISABLED = 1,
+ MAX_HIT = 2,
+ NOT_WHITELISTED = 3,
+ UNSUPPORTED_URL_SCHEME = 4,
+ SAMPLED_OUT = 5,
+ EXTENSION_URLS_DISABLED = 6,
+ EXTENSION_NOT_SYNCED = 7,
+ NOT_MATCHED = 8,
+ EMPTY_URL = 9,
+ NUM_DROPPED_DATA_REASONS
+};
+
+void RecordDroppedSource(DroppedDataReason reason) {
+ UMA_HISTOGRAM_ENUMERATION(
+ "UKM.Sources.Dropped", static_cast<int>(reason),
+ static_cast<int>(DroppedDataReason::NUM_DROPPED_DATA_REASONS));
+}
+
+void RecordDroppedEntry(DroppedDataReason reason) {
+ UMA_HISTOGRAM_ENUMERATION(
+ "UKM.Entries.Dropped", static_cast<int>(reason),
+ static_cast<int>(DroppedDataReason::NUM_DROPPED_DATA_REASONS));
+}
+
+void StoreEntryProto(const mojom::UkmEntry& in, Entry* out) {
+ DCHECK(!out->has_source_id());
+ DCHECK(!out->has_event_hash());
+
+ out->set_source_id(in.source_id);
+ out->set_event_hash(in.event_hash);
+ for (const auto& metric : in.metrics) {
+ Entry::Metric* proto_metric = out->add_metrics();
+ proto_metric->set_metric_hash(metric.first);
+ proto_metric->set_value(metric.second);
+ }
+}
+
+GURL SanitizeURL(const GURL& url) {
+ GURL::Replacements remove_params;
+ remove_params.ClearUsername();
+ remove_params.ClearPassword();
+ // chrome:// and about: URLs params are never used for navigation, only to
+ // prepopulate data on the page, so don't include their params.
+ if (url.SchemeIs(url::kAboutScheme) || url.SchemeIs("chrome")) {
+ remove_params.ClearQuery();
+ }
+ if (url.SchemeIs(kExtensionScheme)) {
+ remove_params.ClearPath();
+ remove_params.ClearQuery();
+ remove_params.ClearRef();
+ }
+ return url.ReplaceComponents(remove_params);
+}
+
+void AppendWhitelistedUrls(
+ const std::map<SourceId, std::unique_ptr<UkmSource>>& sources,
+ std::unordered_set<std::string>* urls) {
+ for (const auto& kv : sources) {
+ if (IsWhitelistedSourceId(kv.first)) {
+ urls->insert(kv.second->url().spec());
+ // Some non-navigation sources only record origin as a URL.
+ // Add the origin from the navigation source to match those too.
+ urls->insert(kv.second->url().GetOrigin().spec());
+ }
+ }
+}
+
+bool HasUnknownMetrics(const ukm::builders::DecodeMap& decode_map,
+ const mojom::UkmEntry& entry) {
+ const auto it = decode_map.find(entry.event_hash);
+ if (it == decode_map.end())
+ return true;
+ const auto& metric_map = it->second.metric_map;
+ for (const auto& metric : entry.metrics) {
+ if (metric_map.count(metric.first) == 0)
+ return true;
+ }
+ return false;
+}
+
+} // namespace
+
+UkmRecorderImpl::UkmRecorderImpl() : recording_enabled_(false) {}
+UkmRecorderImpl::~UkmRecorderImpl() = default;
+
+// static
+void UkmRecorderImpl::CreateFallbackSamplingTrial(
+ bool is_stable_channel,
+ base::FeatureList* feature_list) {
+ static const char kSampledGroup_Stable[] = "Sampled_NoSeed_Stable";
+ static const char kSampledGroup_Other[] = "Sampled_NoSeed_Other";
+ const char* sampled_group = kSampledGroup_Other;
+ int default_sampling = 1; // Sampling is 1-in-N; this is N.
+
+ // Nothing is sampled out except for "stable" which omits almost everything
+ // in this configuration. This is done so that clients that fail to receive
+ // a configuration from the server do not bias aggregated results because
+ // of a relatively large number of records from them.
+ if (is_stable_channel) {
+ sampled_group = kSampledGroup_Stable;
+ default_sampling = 1000000;
+ }
+
+ scoped_refptr<base::FieldTrial> trial(
+ base::FieldTrialList::FactoryGetFieldTrial(
+ kUkmSamplingRateFeature.name, 100, sampled_group,
+ base::FieldTrialList::kNoExpirationYear, 1, 1,
+ base::FieldTrial::ONE_TIME_RANDOMIZED, nullptr));
+
+ // Everybody (100%) should have a sampling configuration.
+ std::map<std::string, std::string> params = {
+ {"_default_sampling", base::IntToString(default_sampling)}};
+ variations::AssociateVariationParams(trial->trial_name(), sampled_group,
+ params);
+ trial->AppendGroup(sampled_group, 100);
+
+ // Setup the feature.
+ feature_list->RegisterFieldTrialOverride(
+ kUkmSamplingRateFeature.name, base::FeatureList::OVERRIDE_ENABLE_FEATURE,
+ trial.get());
+}
+
+UkmRecorderImpl::EventAggregate::EventAggregate() = default;
+UkmRecorderImpl::EventAggregate::~EventAggregate() = default;
+
+UkmRecorderImpl::Recordings::Recordings() = default;
+UkmRecorderImpl::Recordings& UkmRecorderImpl::Recordings::operator=(
+ Recordings&&) = default;
+UkmRecorderImpl::Recordings::~Recordings() = default;
+
+void UkmRecorderImpl::Recordings::Reset() {
+ *this = Recordings();
+}
+
+void UkmRecorderImpl::Recordings::SourceCounts::Reset() {
+ *this = SourceCounts();
+}
+
+void UkmRecorderImpl::EnableRecording(bool extensions) {
+ DVLOG(1) << "UkmRecorderImpl::EnableRecording, extensions=" << extensions;
+ recording_enabled_ = true;
+ extensions_enabled_ = extensions;
+}
+
+void UkmRecorderImpl::DisableRecording() {
+ DVLOG(1) << "UkmRecorderImpl::DisableRecording";
+ recording_enabled_ = false;
+ extensions_enabled_ = false;
+}
+
+void UkmRecorderImpl::DisableSamplingForTesting() {
+ sampling_enabled_ = false;
+}
+
+void UkmRecorderImpl::Purge() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ recordings_.Reset();
+}
+
+void UkmRecorderImpl::SetIsWebstoreExtensionCallback(
+ const IsWebstoreExtensionCallback& callback) {
+ is_webstore_extension_callback_ = callback;
+}
+
+void UkmRecorderImpl::StoreRecordingsInReport(Report* report) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+
+ std::set<SourceId> ids_seen;
+ for (const auto& entry : recordings_.entries) {
+ Entry* proto_entry = report->add_entries();
+ StoreEntryProto(*entry, proto_entry);
+ ids_seen.insert(entry->source_id);
+ }
+
+ std::unordered_set<std::string> url_whitelist;
+ recordings_.carryover_urls_whitelist.swap(url_whitelist);
+ AppendWhitelistedUrls(recordings_.sources, &url_whitelist);
+
+ std::vector<std::unique_ptr<UkmSource>> unsent_sources;
+ int unmatched_sources = 0;
+ for (auto& kv : recordings_.sources) {
+ // If the source id is not whitelisted, don't send it unless it has
+ // associated entries and the URL matches a URL of a whitelisted source.
+ // Note: If ShouldRestrictToWhitelistedSourceIds() is true, this logic will
+ // not be hit as the source would have already been filtered in
+ // UpdateSourceURL().
+ if (!IsWhitelistedSourceId(kv.first)) {
+ // UkmSource should not keep initial_url for non-navigation source IDs.
+ DCHECK_EQ(1u, kv.second->urls().size());
+ if (!url_whitelist.count(kv.second->url().spec())) {
+ RecordDroppedSource(DroppedDataReason::NOT_MATCHED);
+ unmatched_sources++;
+ continue;
+ }
+ if (!base::ContainsKey(ids_seen, kv.first)) {
+ unsent_sources.push_back(std::move(kv.second));
+ continue;
+ }
+ }
+ Source* proto_source = report->add_sources();
+ kv.second->PopulateProto(proto_source);
+ if (!ShouldRecordInitialUrl())
+ proto_source->clear_initial_url();
+ }
+ for (const auto& event_and_aggregate : recordings_.event_aggregations) {
+ if (event_and_aggregate.second.metrics.empty())
+ continue;
+ const EventAggregate& event_aggregate = event_and_aggregate.second;
+ Aggregate* proto_aggregate = report->add_aggregates();
+ proto_aggregate->set_source_id(0); // Across all sources.
+ proto_aggregate->set_event_hash(event_and_aggregate.first);
+ proto_aggregate->set_total_count(event_aggregate.total_count);
+ proto_aggregate->set_dropped_due_to_limits(
+ event_aggregate.dropped_due_to_limits);
+ proto_aggregate->set_dropped_due_to_sampling(
+ event_aggregate.dropped_due_to_sampling);
+ proto_aggregate->set_dropped_due_to_whitelist(
+ event_aggregate.dropped_due_to_whitelist);
+ for (const auto& metric_and_aggregate : event_aggregate.metrics) {
+ const MetricAggregate& aggregate = metric_and_aggregate.second;
+ Aggregate::Metric* proto_metric = proto_aggregate->add_metrics();
+ proto_metric->set_metric_hash(metric_and_aggregate.first);
+ proto_metric->set_value_sum(aggregate.value_sum);
+ proto_metric->set_value_square_sum(aggregate.value_square_sum);
+ if (aggregate.total_count != event_aggregate.total_count) {
+ proto_metric->set_total_count(aggregate.total_count);
+ }
+ if (aggregate.dropped_due_to_limits !=
+ event_aggregate.dropped_due_to_limits) {
+ proto_metric->set_dropped_due_to_limits(
+ aggregate.dropped_due_to_limits);
+ }
+ if (aggregate.dropped_due_to_sampling !=
+ event_aggregate.dropped_due_to_sampling) {
+ proto_metric->set_dropped_due_to_sampling(
+ aggregate.dropped_due_to_sampling);
+ }
+ if (aggregate.dropped_due_to_whitelist !=
+ event_aggregate.dropped_due_to_whitelist) {
+ proto_metric->set_dropped_due_to_whitelist(
+ aggregate.dropped_due_to_whitelist);
+ }
+ }
+ }
+
+ UMA_HISTOGRAM_COUNTS_1000("UKM.Sources.SerializedCount",
+ recordings_.sources.size() - unsent_sources.size());
+ UMA_HISTOGRAM_COUNTS_100000("UKM.Entries.SerializedCount2",
+ recordings_.entries.size());
+ UMA_HISTOGRAM_COUNTS_1000("UKM.Sources.UnsentSourcesCount",
+ unsent_sources.size());
+
+ Report::SourceCounts* source_counts_proto = report->mutable_source_counts();
+ source_counts_proto->set_observed(recordings_.source_counts.observed);
+ source_counts_proto->set_navigation_sources(
+ recordings_.source_counts.navigation_sources);
+ source_counts_proto->set_unmatched_sources(unmatched_sources);
+ source_counts_proto->set_deferred_sources(unsent_sources.size());
+ source_counts_proto->set_carryover_sources(
+ recordings_.source_counts.carryover_sources);
+
+ recordings_.sources.clear();
+ recordings_.source_counts.Reset();
+ recordings_.entries.clear();
+ recordings_.event_aggregations.clear();
+
+ // Keep at most |max_kept_sources|, prioritizing most-recent entries (by
+ // creation time).
+ const size_t max_kept_sources = GetMaxKeptSources();
+ if (unsent_sources.size() > max_kept_sources) {
+ std::nth_element(unsent_sources.begin(),
+ unsent_sources.begin() + max_kept_sources,
+ unsent_sources.end(),
+ [](const std::unique_ptr<ukm::UkmSource>& lhs,
+ const std::unique_ptr<ukm::UkmSource>& rhs) {
+ return lhs->creation_time() > rhs->creation_time();
+ });
+ unsent_sources.resize(max_kept_sources);
+ }
+
+ for (auto& source : unsent_sources) {
+ // We already matched these sources against the URL whitelist.
+ // Re-whitelist them for the next report.
+ recordings_.carryover_urls_whitelist.insert(source->url().spec());
+ recordings_.sources.emplace(source->id(), std::move(source));
+ }
+ UMA_HISTOGRAM_COUNTS_1000("UKM.Sources.KeptSourcesCount",
+ recordings_.sources.size());
+ recordings_.source_counts.carryover_sources = recordings_.sources.size();
+}
+
+bool UkmRecorderImpl::ShouldRestrictToWhitelistedSourceIds() const {
+ return base::GetFieldTrialParamByFeatureAsBool(
+ kUkmFeature, "RestrictToWhitelistedSourceIds", false);
+}
+
+bool UkmRecorderImpl::ShouldRestrictToWhitelistedEntries() const {
+ return true;
+}
+
+void UkmRecorderImpl::UpdateSourceURL(SourceId source_id,
+ const GURL& unsanitized_url) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ const GURL sanitized_url = SanitizeURL(unsanitized_url);
+ if (!ShouldRecordUrl(source_id, sanitized_url))
+ return;
+
+ // TODO(csharrison): These checks can probably move to ShouldRecordUrl.
+
+ if (base::ContainsKey(recordings_.sources, source_id))
+ return;
+
+ if (recordings_.sources.size() >= GetMaxSources()) {
+ RecordDroppedSource(DroppedDataReason::MAX_HIT);
+ return;
+ }
+ RecordSource(std::make_unique<UkmSource>(source_id, sanitized_url));
+}
+
+void UkmRecorderImpl::UpdateAppURL(SourceId source_id, const GURL& url) {
+ if (!extensions_enabled_) {
+ RecordDroppedSource(DroppedDataReason::EXTENSION_URLS_DISABLED);
+ return;
+ }
+ UpdateSourceURL(source_id, url);
+}
+
+void UkmRecorderImpl::RecordNavigation(
+ SourceId source_id,
+ const UkmSource::NavigationData& unsanitized_navigation_data) {
+ DCHECK(GetSourceIdType(source_id) == SourceIdType::NAVIGATION_ID);
+ // TODO(csharrison): Consider changing this behavior so the Source isn't event
+ // recorded at all if the final URL in |unsanitized_navigation_data| should
+ // not be recorded.
+ std::vector<GURL> urls;
+ for (const GURL& url : unsanitized_navigation_data.urls) {
+ const GURL sanitized_url = SanitizeURL(url);
+ if (ShouldRecordUrl(source_id, sanitized_url))
+ urls.push_back(std::move(sanitized_url));
+ }
+
+ // None of the URLs passed the ShouldRecordUrl check, so do not create a new
+ // Source for them.
+ if (urls.empty())
+ return;
+
+ UkmSource::NavigationData sanitized_navigation_data =
+ unsanitized_navigation_data.CopyWithSanitizedUrls(urls);
+ // TODO(csharrison): This check can probably move to ShouldRecordUrl.
+ DCHECK(!base::ContainsKey(recordings_.sources, source_id));
+ if (recordings_.sources.size() >= GetMaxSources()) {
+ RecordDroppedSource(DroppedDataReason::MAX_HIT);
+ return;
+ }
+ RecordSource(
+ std::make_unique<UkmSource>(source_id, sanitized_navigation_data));
+}
+
+bool UkmRecorderImpl::ShouldRecordUrl(SourceId source_id,
+ const GURL& sanitized_url) const {
+ if (!recording_enabled_) {
+ RecordDroppedSource(DroppedDataReason::RECORDING_DISABLED);
+ return false;
+ }
+
+ if (ShouldRestrictToWhitelistedSourceIds() &&
+ !IsWhitelistedSourceId(source_id)) {
+ RecordDroppedSource(DroppedDataReason::NOT_WHITELISTED);
+ return false;
+ }
+
+ if (sanitized_url.is_empty()) {
+ RecordDroppedSource(DroppedDataReason::EMPTY_URL);
+ return false;
+ }
+
+ if (!HasSupportedScheme(sanitized_url)) {
+ RecordDroppedSource(DroppedDataReason::UNSUPPORTED_URL_SCHEME);
+ DVLOG(2) << "Dropped Unsupported UKM URL:" << source_id << ":"
+ << sanitized_url.spec();
+ return false;
+ }
+
+ // Extension URLs need to be specifically enabled and the extension synced.
+ if (sanitized_url.SchemeIs(kExtensionScheme)) {
+ DCHECK_EQ(sanitized_url.GetWithEmptyPath(), sanitized_url);
+ if (!extensions_enabled_) {
+ RecordDroppedSource(DroppedDataReason::EXTENSION_URLS_DISABLED);
+ return false;
+ }
+ if (!is_webstore_extension_callback_ ||
+ !is_webstore_extension_callback_.Run(sanitized_url.host_piece())) {
+ RecordDroppedSource(DroppedDataReason::EXTENSION_NOT_SYNCED);
+ return false;
+ }
+ }
+ return true;
+}
+
+void UkmRecorderImpl::RecordSource(std::unique_ptr<UkmSource> source) {
+ SourceId source_id = source->id();
+ if (GetSourceIdType(source_id) == SourceIdType::NAVIGATION_ID)
+ recordings_.source_counts.navigation_sources++;
+ recordings_.source_counts.observed++;
+ recordings_.sources.emplace(source_id, std::move(source));
+}
+
+void UkmRecorderImpl::AddEntry(mojom::UkmEntryPtr entry) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+
+ DCHECK(!HasUnknownMetrics(decode_map_, *entry));
+
+ if (!recording_enabled_) {
+ RecordDroppedEntry(DroppedDataReason::RECORDING_DISABLED);
+ return;
+ }
+
+ EventAggregate& event_aggregate =
+ recordings_.event_aggregations[entry->event_hash];
+ event_aggregate.total_count++;
+ for (const auto& metric : entry->metrics) {
+ MetricAggregate& aggregate = event_aggregate.metrics[metric.first];
+ double value = metric.second;
+ aggregate.total_count++;
+ aggregate.value_sum += value;
+ aggregate.value_square_sum += value * value;
+ }
+
+ if (ShouldRestrictToWhitelistedEntries() &&
+ !base::ContainsKey(whitelisted_entry_hashes_, entry->event_hash)) {
+ RecordDroppedEntry(DroppedDataReason::NOT_WHITELISTED);
+ event_aggregate.dropped_due_to_whitelist++;
+ for (auto& metric : entry->metrics)
+ event_aggregate.metrics[metric.first].dropped_due_to_whitelist++;
+ return;
+ }
+
+ if (default_sampling_rate_ == 0)
+ LoadExperimentSamplingInfo();
+
+ auto found = event_sampling_rates_.find(entry->event_hash);
+ int sampling_rate = (found != event_sampling_rates_.end())
+ ? found->second
+ : default_sampling_rate_;
+ if (sampling_enabled_ &&
+ (sampling_rate == 0 ||
+ (sampling_rate > 1 && base::RandInt(1, sampling_rate) != 1))) {
+ RecordDroppedEntry(DroppedDataReason::SAMPLED_OUT);
+ event_aggregate.dropped_due_to_sampling++;
+ for (auto& metric : entry->metrics)
+ event_aggregate.metrics[metric.first].dropped_due_to_sampling++;
+ return;
+ }
+
+ if (recordings_.entries.size() >= GetMaxEntries()) {
+ RecordDroppedEntry(DroppedDataReason::MAX_HIT);
+ event_aggregate.dropped_due_to_limits++;
+ for (auto& metric : entry->metrics)
+ event_aggregate.metrics[metric.first].dropped_due_to_limits++;
+ return;
+ }
+
+ recordings_.entries.push_back(std::move(entry));
+}
+
+void UkmRecorderImpl::LoadExperimentSamplingInfo() {
+ DCHECK_EQ(0, default_sampling_rate_);
+ std::map<std::string, std::string> params;
+
+ if (base::FeatureList::IsEnabled(kUkmSamplingRateFeature)) {
+ // Enabled may have various parameters to control sampling.
+ if (base::GetFieldTrialParamsByFeature(kUkmSamplingRateFeature, ¶ms)) {
+ for (const auto& kv : params) {
+ const std::string& key = kv.first;
+ if (key.length() == 0)
+ continue;
+
+ // Keys starting with an underscore are global configuration.
+ if (key.at(0) == '_') {
+ if (key == "_default_sampling") {
+ int sampling;
+ if (base::StringToInt(kv.second, &sampling) && sampling >= 0)
+ default_sampling_rate_ = sampling;
+ }
+ continue;
+ }
+
+ // Anything else is an event name.
+ int sampling;
+ if (base::StringToInt(kv.second, &sampling) && sampling >= 0)
+ event_sampling_rates_[base::HashMetricName(key)] = sampling;
+ }
+ }
+ }
+
+ // Default rate must be >0 to indicate that load is complete.
+ if (default_sampling_rate_ == 0)
+ default_sampling_rate_ = 1;
+}
+
+void UkmRecorderImpl::StoreWhitelistedEntries() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ const auto entries =
+ base::SplitString(GetWhitelistEntries(), ",", base::TRIM_WHITESPACE,
+ base::SPLIT_WANT_NONEMPTY);
+ for (const auto& entry_string : entries)
+ whitelisted_entry_hashes_.insert(base::HashMetricName(entry_string));
+ decode_map_ = ::ukm::builders::CreateDecodeMap();
+}
+
+} // namespace ukm
diff --git a/components/ukm/ukm_recorder_impl.h b/components/ukm/ukm_recorder_impl.h
new file mode 100644
index 0000000..7f3084f
--- /dev/null
+++ b/components/ukm/ukm_recorder_impl.h
@@ -0,0 +1,201 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_UKM_UKM_RECORDER_IMPL_H_
+#define COMPONENTS_UKM_UKM_RECORDER_IMPL_H_
+
+#include <map>
+#include <memory>
+#include <set>
+#include <string>
+#include <unordered_set>
+#include <vector>
+
+#include "base/callback_forward.h"
+#include "base/containers/flat_map.h"
+#include "base/sequence_checker.h"
+#include "base/strings/string_piece.h"
+#include "services/metrics/public/cpp/ukm_decode.h"
+#include "services/metrics/public/cpp/ukm_recorder.h"
+#include "services/metrics/public/mojom/ukm_interface.mojom.h"
+
+namespace metrics {
+class UkmBrowserTestBase;
+class UkmEGTestHelper;
+}
+
+namespace ukm {
+class Report;
+class UkmSource;
+class UkmUtilsForTest;
+
+namespace debug {
+class UkmDebugDataExtractor;
+}
+
+class UkmRecorderImpl : public UkmRecorder {
+ using IsWebstoreExtensionCallback =
+ base::RepeatingCallback<bool(base::StringPiece id)>;
+
+ public:
+ UkmRecorderImpl();
+ ~UkmRecorderImpl() override;
+
+ // Unconditionally attempts to create a field trial to control client side
+ // metrics/crash sampling to use as a fallback when one hasn't been
+ // provided. This is expected to occur on first-run on platforms that don't
+ // have first-run variations support. This should only be called when there is
+ // no existing field trial controlling the sampling feature.
+ static void CreateFallbackSamplingTrial(bool is_stable_channel,
+ base::FeatureList* feature_list);
+
+ // Enables/disables recording control if data is allowed to be collected. The
+ // |extensions| flag separately controls recording of chrome-extension://
+ // URLs; this flag should reflect the "sync extensions" user setting.
+ void EnableRecording(bool extensions);
+ void DisableRecording();
+
+ // Disables sampling for testing purposes.
+ void DisableSamplingForTesting() override;
+
+ // Deletes stored recordings.
+ void Purge();
+
+ // Sets a callback for determining if an extension URL can be recorded.
+ void SetIsWebstoreExtensionCallback(
+ const IsWebstoreExtensionCallback& callback);
+
+ protected:
+ // Cache the list of whitelisted entries from the field trial parameter.
+ void StoreWhitelistedEntries();
+
+ // Writes recordings into a report proto, and clears recordings.
+ void StoreRecordingsInReport(Report* report);
+
+ const std::map<SourceId, std::unique_ptr<UkmSource>>& sources() const {
+ return recordings_.sources;
+ }
+
+ const std::vector<mojom::UkmEntryPtr>& entries() const {
+ return recordings_.entries;
+ }
+
+ // UkmRecorder:
+ void UpdateSourceURL(SourceId source_id, const GURL& url) override;
+ void UpdateAppURL(SourceId source_id, const GURL& url) override;
+ void RecordNavigation(
+ SourceId source_id,
+ const UkmSource::NavigationData& navigation_data) override;
+ using UkmRecorder::RecordOtherURL;
+
+ virtual bool ShouldRestrictToWhitelistedSourceIds() const;
+
+ virtual bool ShouldRestrictToWhitelistedEntries() const;
+
+ private:
+ friend ::metrics::UkmBrowserTestBase;
+ friend ::metrics::UkmEGTestHelper;
+ friend ::ukm::debug::UkmDebugDataExtractor;
+ friend ::ukm::UkmUtilsForTest;
+
+ struct MetricAggregate {
+ uint64_t total_count = 0;
+ double value_sum = 0;
+ double value_square_sum = 0.0;
+ uint64_t dropped_due_to_limits = 0;
+ uint64_t dropped_due_to_sampling = 0;
+ uint64_t dropped_due_to_whitelist = 0;
+ };
+
+ struct EventAggregate {
+ EventAggregate();
+ ~EventAggregate();
+
+ base::flat_map<uint64_t, MetricAggregate> metrics;
+ uint64_t total_count = 0;
+ uint64_t dropped_due_to_limits = 0;
+ uint64_t dropped_due_to_sampling = 0;
+ uint64_t dropped_due_to_whitelist = 0;
+ };
+
+ using MetricAggregateMap = std::map<uint64_t, MetricAggregate>;
+
+ // Returns true if |sanitized_url| should be recorded.
+ bool ShouldRecordUrl(SourceId source_id, const GURL& sanitized_url) const;
+
+ void RecordSource(std::unique_ptr<UkmSource> source);
+
+ void AddEntry(mojom::UkmEntryPtr entry) override;
+
+ // Load sampling configurations from field-trial information.
+ void LoadExperimentSamplingInfo();
+
+ // Whether recording new data is currently allowed.
+ bool recording_enabled_ = false;
+
+ // Indicates whether recording is enabled for extensions.
+ bool extensions_enabled_ = false;
+
+ // Indicates if sampling has been enabled.
+ bool sampling_enabled_ = true;
+
+ // Callback for checking extension IDs.
+ IsWebstoreExtensionCallback is_webstore_extension_callback_;
+
+ // Map from hashes to entry and metric names.
+ ukm::builders::DecodeMap decode_map_;
+
+ // Whitelisted Entry hashes, only the ones in this set will be recorded.
+ std::set<uint64_t> whitelisted_entry_hashes_;
+
+ // Sampling configurations, loaded from a field-trial.
+ int default_sampling_rate_ = 0;
+ base::flat_map<uint64_t, int> event_sampling_rates_;
+
+ // Contains data from various recordings which periodically get serialized
+ // and cleared by StoreRecordingsInReport() and may be Purged().
+ struct Recordings {
+ Recordings();
+ Recordings& operator=(Recordings&&);
+ ~Recordings();
+
+ // Data captured by UpdateSourceUrl().
+ std::map<SourceId, std::unique_ptr<UkmSource>> sources;
+
+ // Data captured by AddEntry().
+ std::vector<mojom::UkmEntryPtr> entries;
+
+ // URLs of sources that matched a whitelist url, but were not included in
+ // the report generated by the last log rotation because we haven't seen any
+ // events for that source yet.
+ std::unordered_set<std::string> carryover_urls_whitelist;
+
+ // Aggregate information for collected event metrics.
+ std::map<uint64_t, EventAggregate> event_aggregations;
+
+ // Aggregated counters about Sources recorded in the current log.
+ struct SourceCounts {
+ // Count of URLs recorded for all sources.
+ size_t observed = 0;
+ // Count of URLs recorded for all SourceIdType::NAVIGATION_ID Sources.
+ size_t navigation_sources = 0;
+ // Sources carried over (not recorded) from a previous logging rotation.
+ size_t carryover_sources = 0;
+
+ // Resets all of the data.
+ void Reset();
+ };
+ SourceCounts source_counts;
+
+ // Resets all of the data.
+ void Reset();
+ };
+ Recordings recordings_;
+
+ SEQUENCE_CHECKER(sequence_checker_);
+};
+
+} // namespace ukm
+
+#endif // COMPONENTS_UKM_UKM_RECORDER_IMPL_H_
diff --git a/components/ukm/ukm_reporting_service.cc b/components/ukm/ukm_reporting_service.cc
new file mode 100644
index 0000000..cf2b1c8
--- /dev/null
+++ b/components/ukm/ukm_reporting_service.cc
@@ -0,0 +1,112 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// ReportingService specialized to report UKM metrics.
+
+#include "components/ukm/ukm_reporting_service.h"
+
+#include <memory>
+
+#include "base/metrics/field_trial_params.h"
+#include "base/metrics/histogram_functions.h"
+#include "base/metrics/histogram_macros.h"
+#include "components/prefs/pref_registry_simple.h"
+#include "components/ukm/persisted_logs_metrics_impl.h"
+#include "components/ukm/ukm_pref_names.h"
+#include "components/ukm/ukm_service.h"
+
+namespace ukm {
+
+namespace {
+
+// The UKM server's URL.
+constexpr char kMimeType[] = "application/vnd.chrome.ukm";
+
+// The number of UKM logs that will be stored in PersistedLogs before logs
+// start being dropped.
+constexpr int kMinPersistedLogs = 8;
+
+// The number of bytes UKM logs that will be stored in PersistedLogs before
+// logs start being dropped.
+// This ensures that a reasonable amount of history will be stored even if there
+// is a long series of very small logs.
+constexpr int kMinPersistedBytes = 300000;
+
+// If an upload fails, and the transmission was over this byte count, then we
+// will discard the log, and not try to retransmit it. We also don't persist
+// the log to the prefs for transmission during the next chrome session if this
+// limit is exceeded.
+constexpr size_t kMaxLogRetransmitSize = 100 * 1024;
+
+std::string GetServerUrl() {
+ constexpr char kDefaultServerUrl[] = "https://clients4.google.com/ukm";
+ std::string server_url =
+ base::GetFieldTrialParamValueByFeature(kUkmFeature, "ServerUrl");
+ if (!server_url.empty())
+ return server_url;
+ return kDefaultServerUrl;
+}
+
+} // namespace
+
+// static
+void UkmReportingService::RegisterPrefs(PrefRegistrySimple* registry) {
+ registry->RegisterListPref(prefs::kUkmPersistedLogs);
+ // Base class already registered by MetricsReportingService::RegisterPrefs
+ // ReportingService::RegisterPrefs(registry);
+}
+
+UkmReportingService::UkmReportingService(metrics::MetricsServiceClient* client,
+ PrefService* local_state)
+ : ReportingService(client, local_state, kMaxLogRetransmitSize),
+ persisted_logs_(std::make_unique<ukm::PersistedLogsMetricsImpl>(),
+ local_state,
+ prefs::kUkmPersistedLogs,
+ kMinPersistedLogs,
+ kMinPersistedBytes,
+ kMaxLogRetransmitSize) {}
+
+UkmReportingService::~UkmReportingService() {}
+
+metrics::LogStore* UkmReportingService::log_store() {
+ return &persisted_logs_;
+}
+
+std::string UkmReportingService::GetUploadUrl() const {
+ return GetServerUrl();
+}
+
+std::string UkmReportingService::GetInsecureUploadUrl() const {
+ return "";
+}
+
+base::StringPiece UkmReportingService::upload_mime_type() const {
+ return kMimeType;
+}
+
+metrics::MetricsLogUploader::MetricServiceType
+UkmReportingService::service_type() const {
+ return metrics::MetricsLogUploader::UKM;
+}
+
+void UkmReportingService::LogCellularConstraint(bool upload_canceled) {
+ UMA_HISTOGRAM_BOOLEAN("UKM.LogUpload.Canceled.CellularConstraint",
+ upload_canceled);
+}
+
+void UkmReportingService::LogResponseOrErrorCode(int response_code,
+ int error_code,
+ bool was_https) {
+ // |was_https| is ignored since all UKM logs are received over HTTPS.
+ base::UmaHistogramSparse("UKM.LogUpload.ResponseOrErrorCode",
+ response_code >= 0 ? response_code : error_code);
+}
+
+void UkmReportingService::LogSuccess(size_t log_size) {
+ UMA_HISTOGRAM_COUNTS_10000("UKM.LogSize.OnSuccess", log_size / 1024);
+}
+
+void UkmReportingService::LogLargeRejection(size_t log_size) {}
+
+} // namespace metrics
diff --git a/components/ukm/ukm_reporting_service.h b/components/ukm/ukm_reporting_service.h
new file mode 100644
index 0000000..a805395
--- /dev/null
+++ b/components/ukm/ukm_reporting_service.h
@@ -0,0 +1,69 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file defines a service that sends ukm logs to a server.
+
+#ifndef COMPONENTS_UKM_UKM_REPORTING_SERVICE_H_
+#define COMPONENTS_UKM_UKM_REPORTING_SERVICE_H_
+
+#include <stdint.h>
+
+#include <string>
+
+#include "base/macros.h"
+#include "components/metrics/persisted_logs.h"
+#include "components/metrics/reporting_service.h"
+
+class PrefService;
+class PrefRegistrySimple;
+
+namespace metrics {
+class MetricsServiceClient;
+}
+
+namespace ukm {
+
+// A service that uploads logs to the UKM server.
+class UkmReportingService : public metrics::ReportingService {
+ public:
+ // Creates the UkmReportingService with the given |client|, and
+ // |local_state|. Does not take ownership of the paramaters; instead stores
+ // a weak pointer to each. Caller should ensure that the parameters are valid
+ // for the lifetime of this class.
+ UkmReportingService(metrics::MetricsServiceClient* client,
+ PrefService* local_state);
+ ~UkmReportingService() override;
+
+ // At startup, prefs needs to be called with a list of all the pref names and
+ // types we'll be using.
+ static void RegisterPrefs(PrefRegistrySimple* registry);
+
+ metrics::PersistedLogs* ukm_log_store() { return &persisted_logs_; }
+ const metrics::PersistedLogs* ukm_log_store() const {
+ return &persisted_logs_;
+ }
+
+ private:
+ // metrics:ReportingService:
+ metrics::LogStore* log_store() override;
+ std::string GetUploadUrl() const override;
+ // Returns an empty string since retrying over HTTP is not enabled for UKM
+ std::string GetInsecureUploadUrl() const override;
+ base::StringPiece upload_mime_type() const override;
+ metrics::MetricsLogUploader::MetricServiceType service_type() const override;
+ void LogCellularConstraint(bool upload_canceled) override;
+ void LogResponseOrErrorCode(int response_code,
+ int error_code,
+ bool was_https) override;
+ void LogSuccess(size_t log_size) override;
+ void LogLargeRejection(size_t log_size) override;
+
+ metrics::PersistedLogs persisted_logs_;
+
+ DISALLOW_COPY_AND_ASSIGN(UkmReportingService);
+};
+
+} // namespace ukm
+
+#endif // COMPONENTS_UKM_UKM_REPORTING_SERVICE_H_
diff --git a/components/ukm/ukm_rotation_scheduler.cc b/components/ukm/ukm_rotation_scheduler.cc
new file mode 100644
index 0000000..787ed1a
--- /dev/null
+++ b/components/ukm/ukm_rotation_scheduler.cc
@@ -0,0 +1,24 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/ukm/ukm_rotation_scheduler.h"
+
+#include "base/metrics/histogram_macros.h"
+
+namespace ukm {
+
+UkmRotationScheduler::UkmRotationScheduler(
+ const base::Closure& upload_callback,
+ const base::Callback<base::TimeDelta(void)>& upload_interval_callback)
+ : metrics::MetricsRotationScheduler(upload_callback,
+ upload_interval_callback) {}
+
+UkmRotationScheduler::~UkmRotationScheduler() = default;
+
+void UkmRotationScheduler::LogMetricsInitSequence(InitSequence sequence) {
+ UMA_HISTOGRAM_ENUMERATION("UKM.InitSequence", sequence,
+ INIT_SEQUENCE_ENUM_SIZE);
+}
+
+} // namespace ukm
diff --git a/components/ukm/ukm_rotation_scheduler.h b/components/ukm/ukm_rotation_scheduler.h
new file mode 100644
index 0000000..5c83527
--- /dev/null
+++ b/components/ukm/ukm_rotation_scheduler.h
@@ -0,0 +1,33 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_UKM_METRICS_REPORTING_SCHEDULER_H_
+#define COMPONENTS_UKM_METRICS_REPORTING_SCHEDULER_H_
+
+#include "base/time/time.h"
+#include "components/metrics/metrics_rotation_scheduler.h"
+
+namespace ukm {
+
+// Scheduler to drive a UkmService object's log rotations.
+class UkmRotationScheduler : public metrics::MetricsRotationScheduler {
+ public:
+ // Creates UkmRotationScheduler object with the given |rotation_callback|
+ // callback to call when log rotation should happen and |interval_callback|
+ // to determine the interval between rotations in steady state.
+ UkmRotationScheduler(
+ const base::Closure& rotation_callback,
+ const base::Callback<base::TimeDelta(void)>& interval_callback);
+ ~UkmRotationScheduler() override;
+
+ private:
+ // Record the init sequence order histogram.
+ void LogMetricsInitSequence(InitSequence sequence) override;
+
+ DISALLOW_COPY_AND_ASSIGN(UkmRotationScheduler);
+};
+
+} // namespace ukm
+
+#endif // COMPONENTS_UKM_METRICS_REPORTING_SCHEDULER_H_
diff --git a/components/ukm/ukm_service.cc b/components/ukm/ukm_service.cc
new file mode 100644
index 0000000..2f6b875
--- /dev/null
+++ b/components/ukm/ukm_service.cc
@@ -0,0 +1,258 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/ukm/ukm_service.h"
+
+#include <memory>
+#include <string>
+#include <utility>
+
+#include "base/bind.h"
+#include "base/feature_list.h"
+#include "base/metrics/field_trial.h"
+#include "base/metrics/field_trial_params.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/rand_util.h"
+#include "base/threading/sequenced_task_runner_handle.h"
+#include "base/time/time.h"
+#include "components/metrics/metrics_log.h"
+#include "components/metrics/metrics_service_client.h"
+#include "components/prefs/pref_registry_simple.h"
+#include "components/prefs/pref_service.h"
+#include "components/ukm/persisted_logs_metrics_impl.h"
+#include "components/ukm/ukm_pref_names.h"
+#include "components/ukm/ukm_rotation_scheduler.h"
+#include "services/metrics/public/cpp/delegating_ukm_recorder.h"
+#include "third_party/metrics_proto/ukm/report.pb.h"
+
+namespace ukm {
+
+namespace {
+
+// Generates a new client id and stores it in prefs.
+uint64_t GenerateClientId(PrefService* pref_service) {
+ uint64_t client_id = 0;
+ while (!client_id)
+ client_id = base::RandUint64();
+ pref_service->SetInt64(prefs::kUkmClientId, client_id);
+
+ // Also reset the session id counter.
+ pref_service->SetInteger(prefs::kUkmSessionId, 0);
+ return client_id;
+}
+
+uint64_t LoadOrGenerateClientId(PrefService* pref_service) {
+ uint64_t client_id = pref_service->GetInt64(prefs::kUkmClientId);
+ if (!client_id)
+ client_id = GenerateClientId(pref_service);
+ return client_id;
+}
+
+int32_t LoadSessionId(PrefService* pref_service) {
+ int32_t session_id = pref_service->GetInteger(prefs::kUkmSessionId);
+ ++session_id; // increment session id, once per session
+ pref_service->SetInteger(prefs::kUkmSessionId, session_id);
+ return session_id;
+}
+
+} // namespace
+
+UkmService::UkmService(PrefService* pref_service,
+ metrics::MetricsServiceClient* client,
+ bool restrict_to_whitelist_entries)
+ : pref_service_(pref_service),
+ restrict_to_whitelist_entries_(restrict_to_whitelist_entries),
+ client_id_(0),
+ session_id_(0),
+ report_count_(0),
+ client_(client),
+ reporting_service_(client, pref_service),
+ initialize_started_(false),
+ initialize_complete_(false),
+ self_ptr_factory_(this) {
+ DCHECK(pref_service_);
+ DCHECK(client_);
+ DVLOG(1) << "UkmService::Constructor";
+
+ reporting_service_.Initialize();
+
+ base::Closure rotate_callback =
+ base::Bind(&UkmService::RotateLog, self_ptr_factory_.GetWeakPtr());
+ // MetricsServiceClient outlives UkmService, and
+ // MetricsReportingScheduler is tied to the lifetime of |this|.
+ const base::Callback<base::TimeDelta(void)>& get_upload_interval_callback =
+ base::Bind(&metrics::MetricsServiceClient::GetStandardUploadInterval,
+ base::Unretained(client_));
+ scheduler_.reset(new ukm::UkmRotationScheduler(rotate_callback,
+ get_upload_interval_callback));
+
+ StoreWhitelistedEntries();
+
+ DelegatingUkmRecorder::Get()->AddDelegate(self_ptr_factory_.GetWeakPtr());
+}
+
+UkmService::~UkmService() {
+ DisableReporting();
+ DelegatingUkmRecorder::Get()->RemoveDelegate(this);
+}
+
+void UkmService::Initialize() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK(!initialize_started_);
+ DVLOG(1) << "UkmService::Initialize";
+ initialize_started_ = true;
+
+ DCHECK_EQ(0, report_count_);
+ client_id_ = LoadOrGenerateClientId(pref_service_);
+ session_id_ = LoadSessionId(pref_service_);
+ metrics_providers_.Init();
+
+ StartInitTask();
+}
+
+void UkmService::EnableReporting() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DVLOG(1) << "UkmService::EnableReporting";
+ if (reporting_service_.reporting_active())
+ return;
+
+ metrics_providers_.OnRecordingEnabled();
+
+ if (!initialize_started_)
+ Initialize();
+ scheduler_->Start();
+ reporting_service_.EnableReporting();
+}
+
+void UkmService::DisableReporting() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DVLOG(1) << "UkmService::DisableReporting";
+
+ reporting_service_.DisableReporting();
+
+ metrics_providers_.OnRecordingDisabled();
+
+ scheduler_->Stop();
+ Flush();
+}
+
+#if defined(OS_ANDROID) || defined(OS_IOS)
+void UkmService::OnAppEnterForeground() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DVLOG(1) << "UkmService::OnAppEnterForeground";
+
+ // If initialize_started_ is false, UKM has not yet been started, so bail. The
+ // scheduler will instead be started via EnableReporting().
+ if (!initialize_started_)
+ return;
+
+ scheduler_->Start();
+}
+
+void UkmService::OnAppEnterBackground() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DVLOG(1) << "UkmService::OnAppEnterBackground";
+
+ if (!initialize_started_)
+ return;
+
+ scheduler_->Stop();
+
+ // Give providers a chance to persist ukm data as part of being backgrounded.
+ metrics_providers_.OnAppEnterBackground();
+
+ Flush();
+}
+#endif
+
+void UkmService::Flush() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ if (initialize_complete_)
+ BuildAndStoreLog();
+ reporting_service_.ukm_log_store()->PersistUnsentLogs();
+}
+
+void UkmService::Purge() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DVLOG(1) << "UkmService::Purge";
+ reporting_service_.ukm_log_store()->Purge();
+ UkmRecorderImpl::Purge();
+}
+
+// TODO(bmcquade): rename this to something more generic, like
+// ResetClientState. Consider resetting all prefs here.
+void UkmService::ResetClientId() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ client_id_ = GenerateClientId(pref_service_);
+ session_id_ = LoadSessionId(pref_service_);
+ report_count_ = 0;
+}
+
+void UkmService::RegisterMetricsProvider(
+ std::unique_ptr<metrics::MetricsProvider> provider) {
+ metrics_providers_.RegisterMetricsProvider(std::move(provider));
+}
+
+// static
+void UkmService::RegisterPrefs(PrefRegistrySimple* registry) {
+ registry->RegisterInt64Pref(prefs::kUkmClientId, 0);
+ registry->RegisterIntegerPref(prefs::kUkmSessionId, 0);
+ UkmReportingService::RegisterPrefs(registry);
+}
+
+void UkmService::StartInitTask() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DVLOG(1) << "UkmService::StartInitTask";
+ metrics_providers_.AsyncInit(base::Bind(&UkmService::FinishedInitTask,
+ self_ptr_factory_.GetWeakPtr()));
+}
+
+void UkmService::FinishedInitTask() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DVLOG(1) << "UkmService::FinishedInitTask";
+ initialize_complete_ = true;
+ scheduler_->InitTaskComplete();
+}
+
+void UkmService::RotateLog() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DVLOG(1) << "UkmService::RotateLog";
+ if (!reporting_service_.ukm_log_store()->has_unsent_logs())
+ BuildAndStoreLog();
+ reporting_service_.Start();
+ scheduler_->RotationFinished();
+}
+
+void UkmService::BuildAndStoreLog() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DVLOG(1) << "UkmService::BuildAndStoreLog";
+
+ // Suppress generating a log if we have no new data to include.
+ // TODO(zhenw): add a histogram here to debug if this case is hitting a lot.
+ if (sources().empty() && entries().empty())
+ return;
+
+ Report report;
+ report.set_client_id(client_id_);
+ report.set_session_id(session_id_);
+ report.set_report_id(++report_count_);
+
+ StoreRecordingsInReport(&report);
+
+ metrics::MetricsLog::RecordCoreSystemProfile(client_,
+ report.mutable_system_profile());
+
+ metrics_providers_.ProvideSystemProfileMetrics(
+ report.mutable_system_profile());
+
+ std::string serialized_log;
+ report.SerializeToString(&serialized_log);
+ reporting_service_.ukm_log_store()->StoreLog(serialized_log);
+}
+
+bool UkmService::ShouldRestrictToWhitelistedEntries() const {
+ return restrict_to_whitelist_entries_;
+}
+
+} // namespace ukm
diff --git a/components/ukm/ukm_service.h b/components/ukm/ukm_service.h
new file mode 100644
index 0000000..e5c6136
--- /dev/null
+++ b/components/ukm/ukm_service.h
@@ -0,0 +1,154 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_UKM_UKM_SERVICE_H_
+#define COMPONENTS_UKM_UKM_SERVICE_H_
+
+#include <stddef.h>
+#include <memory>
+#include <vector>
+
+#include "base/macros.h"
+#include "base/memory/weak_ptr.h"
+#include "base/sequence_checker.h"
+#include "build/build_config.h"
+#include "components/metrics/delegating_provider.h"
+#include "components/metrics/metrics_provider.h"
+#include "components/metrics/metrics_rotation_scheduler.h"
+#include "components/ukm/ukm_recorder_impl.h"
+#include "components/ukm/ukm_reporting_service.h"
+
+class PrefRegistrySimple;
+class PrefService;
+
+namespace metrics {
+class MetricsServiceClient;
+class UkmBrowserTestBase;
+class UkmEGTestHelper;
+}
+
+namespace ukm {
+
+namespace debug {
+class UkmDebugDataExtractor;
+}
+
+// The URL-Keyed Metrics (UKM) service is responsible for gathering and
+// uploading reports that contain fine grained performance metrics including
+// URLs for top-level navigations.
+class UkmService : public UkmRecorderImpl {
+ public:
+ // Constructs a UkmService.
+ // Calling code is responsible for ensuring that the lifetime of
+ // |pref_service| is longer than the lifetime of UkmService.
+ UkmService(PrefService* pref_service,
+ metrics::MetricsServiceClient* client,
+ bool restrict_to_whitelist_entries);
+ ~UkmService() override;
+
+ // Initializes the UKM service.
+ void Initialize();
+
+ // Enables/disables transmission of accumulated logs. Logs that have already
+ // been created will remain persisted to disk.
+ void EnableReporting();
+ void DisableReporting();
+
+#if defined(OS_ANDROID) || defined(OS_IOS)
+ void OnAppEnterBackground();
+ void OnAppEnterForeground();
+#endif
+
+ // Records any collected data into logs, and writes to disk.
+ void Flush();
+
+ // Deletes any unsent local data.
+ void Purge();
+
+ // Resets the client id stored in prefs.
+ void ResetClientId();
+
+ // Registers the specified |provider| to provide additional metrics into the
+ // UKM log. Should be called during MetricsService initialization only.
+ void RegisterMetricsProvider(
+ std::unique_ptr<metrics::MetricsProvider> provider);
+
+ // Registers the names of all of the preferences used by UkmService in
+ // the provided PrefRegistry.
+ static void RegisterPrefs(PrefRegistrySimple* registry);
+
+ int32_t report_count() const { return report_count_; }
+
+ private:
+ friend ::metrics::UkmBrowserTestBase;
+ friend ::metrics::UkmEGTestHelper;
+ friend ::ukm::debug::UkmDebugDataExtractor;
+ friend ::ukm::UkmUtilsForTest;
+
+ // Starts metrics client initialization.
+ void StartInitTask();
+
+ // Called when initialization tasks are complete, to notify the scheduler
+ // that it can begin calling RotateLog.
+ void FinishedInitTask();
+
+ // Periodically called by scheduler_ to advance processing of logs.
+ void RotateLog();
+
+ // Constructs a new Report from available data and stores it in
+ // persisted_logs_.
+ void BuildAndStoreLog();
+
+ // Starts an upload of the next log from persisted_logs_.
+ void StartScheduledUpload();
+
+ // Called by log_uploader_ when the an upload is completed.
+ void OnLogUploadComplete(int response_code);
+
+ // ukm::UkmRecorderImpl:
+ bool ShouldRestrictToWhitelistedEntries() const override;
+
+ // A weak pointer to the PrefService used to read and write preferences.
+ PrefService* pref_service_;
+
+ // If true, only whitelisted Entries should be recorded.
+ bool restrict_to_whitelist_entries_;
+
+ // The UKM client id stored in prefs.
+ uint64_t client_id_;
+
+ // The UKM session id stored in prefs.
+ int32_t session_id_;
+
+ // The number of reports generated this session.
+ int32_t report_count_;
+
+ // Used to interact with the embedder. Weak pointer; must outlive |this|
+ // instance.
+ metrics::MetricsServiceClient* const client_;
+
+ // Registered metrics providers.
+ metrics::DelegatingProvider metrics_providers_;
+
+ // Log reporting service.
+ ukm::UkmReportingService reporting_service_;
+
+ // The scheduler for determining when uploads should happen.
+ std::unique_ptr<metrics::MetricsRotationScheduler> scheduler_;
+
+ SEQUENCE_CHECKER(sequence_checker_);
+
+ bool initialize_started_;
+ bool initialize_complete_;
+
+ // Weak pointers factory used to post task on different threads. All weak
+ // pointers managed by this factory have the same lifetime as UkmService.
+ base::WeakPtrFactory<UkmService> self_ptr_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(UkmService);
+};
+
+} // namespace ukm
+
+#endif // COMPONENTS_UKM_UKM_SERVICE_H_
diff --git a/components/ukm/ukm_service_unittest.cc b/components/ukm/ukm_service_unittest.cc
new file mode 100644
index 0000000..e73c0c2
--- /dev/null
+++ b/components/ukm/ukm_service_unittest.cc
@@ -0,0 +1,1144 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/ukm/ukm_service.h"
+
+#include <map>
+#include <memory>
+#include <string>
+#include <utility>
+
+#include "base/bind.h"
+#include "base/hash.h"
+#include "base/metrics/metrics_hashes.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_piece.h"
+#include "base/strings/string_util.h"
+#include "base/test/scoped_feature_list.h"
+#include "base/test/test_simple_task_runner.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "base/time/time.h"
+#include "components/metrics/test_metrics_provider.h"
+#include "components/metrics/test_metrics_service_client.h"
+#include "components/prefs/testing_pref_service.h"
+#include "components/ukm/persisted_logs_metrics_impl.h"
+#include "components/ukm/ukm_pref_names.h"
+#include "components/variations/variations_associated_data.h"
+#include "services/metrics/public/cpp/ukm_builders.h"
+#include "services/metrics/public/cpp/ukm_entry_builder.h"
+#include "services/metrics/public/cpp/ukm_source.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/metrics_proto/ukm/report.pb.h"
+#include "third_party/metrics_proto/ukm/source.pb.h"
+#include "third_party/zlib/google/compression_utils.h"
+
+namespace ukm {
+
+// Some arbitrary events used in tests.
+using TestEvent1 = ukm::builders::PageLoad;
+const char* kTestEvent1Metric1 =
+ TestEvent1::kPaintTiming_NavigationToFirstContentfulPaintName;
+const char* kTestEvent1Metric2 = TestEvent1::kNet_CacheBytesName;
+using TestEvent2 = ukm::builders::Memory_Experimental;
+const char* kTestEvent2Metric1 = TestEvent2::kArrayBufferName;
+const char* kTestEvent2Metric2 = TestEvent2::kBlinkGCName;
+using TestEvent3 = ukm::builders::Previews;
+
+std::string Entry1And2Whitelist() {
+ return std::string(TestEvent1::kEntryName) + ',' + TestEvent2::kEntryName;
+}
+
+// A small shim exposing UkmRecorder methods to tests.
+class TestRecordingHelper {
+ public:
+ explicit TestRecordingHelper(UkmRecorder* recorder) : recorder_(recorder) {
+ recorder_->DisableSamplingForTesting();
+ }
+
+ void UpdateSourceURL(SourceId source_id, const GURL& url) {
+ recorder_->UpdateSourceURL(source_id, url);
+ }
+
+ void RecordNavigation(SourceId source_id,
+ const UkmSource::NavigationData& navigation_data) {
+ recorder_->RecordNavigation(source_id, navigation_data);
+ }
+
+ private:
+ UkmRecorder* recorder_;
+
+ DISALLOW_COPY_AND_ASSIGN(TestRecordingHelper);
+};
+
+namespace {
+
+bool TestIsWebstoreExtension(base::StringPiece id) {
+ return (id == "bhcnanendmgjjeghamaccjnochlnhcgj");
+}
+
+// TODO(rkaplow): consider making this a generic testing class in
+// components/variations.
+class ScopedUkmFeatureParams {
+ public:
+ ScopedUkmFeatureParams(
+ base::FeatureList::OverrideState feature_state,
+ const std::map<std::string, std::string>& variation_params) {
+ static const char kTestFieldTrialName[] = "TestTrial";
+ static const char kTestExperimentGroupName[] = "TestGroup";
+
+ variations::testing::ClearAllVariationParams();
+
+ EXPECT_TRUE(variations::AssociateVariationParams(
+ kTestFieldTrialName, kTestExperimentGroupName, variation_params));
+
+ base::FieldTrial* field_trial = base::FieldTrialList::CreateFieldTrial(
+ kTestFieldTrialName, kTestExperimentGroupName);
+
+ std::unique_ptr<base::FeatureList> feature_list(new base::FeatureList);
+ feature_list->RegisterFieldTrialOverride(kUkmFeature.name, feature_state,
+ field_trial);
+
+ // Since we are adding a scoped feature list after browser start, copy over
+ // the existing feature list to prevent inconsistency.
+ base::FeatureList* existing_feature_list = base::FeatureList::GetInstance();
+ if (existing_feature_list) {
+ std::string enabled_features;
+ std::string disabled_features;
+ base::FeatureList::GetInstance()->GetFeatureOverrides(&enabled_features,
+ &disabled_features);
+ feature_list->InitializeFromCommandLine(enabled_features,
+ disabled_features);
+ }
+
+ scoped_feature_list_.InitWithFeatureList(std::move(feature_list));
+ }
+
+ ~ScopedUkmFeatureParams() { variations::testing::ClearAllVariationParams(); }
+
+ private:
+ base::test::ScopedFeatureList scoped_feature_list_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedUkmFeatureParams);
+};
+
+class UkmServiceTest : public testing::Test {
+ public:
+ UkmServiceTest()
+ : task_runner_(new base::TestSimpleTaskRunner),
+ task_runner_handle_(task_runner_) {
+ UkmService::RegisterPrefs(prefs_.registry());
+ ClearPrefs();
+ }
+
+ void ClearPrefs() {
+ prefs_.ClearPref(prefs::kUkmClientId);
+ prefs_.ClearPref(prefs::kUkmSessionId);
+ prefs_.ClearPref(prefs::kUkmPersistedLogs);
+ }
+
+ int GetPersistedLogCount() {
+ const base::ListValue* list_value =
+ prefs_.GetList(prefs::kUkmPersistedLogs);
+ return list_value->GetSize();
+ }
+
+ Report GetPersistedReport() {
+ EXPECT_GE(GetPersistedLogCount(), 1);
+ metrics::PersistedLogs result_persisted_logs(
+ std::make_unique<ukm::PersistedLogsMetricsImpl>(), &prefs_,
+ prefs::kUkmPersistedLogs,
+ 3, // log count limit
+ 1000, // byte limit
+ 0);
+
+ result_persisted_logs.LoadPersistedUnsentLogs();
+ result_persisted_logs.StageNextLog();
+
+ std::string uncompressed_log_data;
+ EXPECT_TRUE(compression::GzipUncompress(result_persisted_logs.staged_log(),
+ &uncompressed_log_data));
+
+ Report report;
+ EXPECT_TRUE(report.ParseFromString(uncompressed_log_data));
+ return report;
+ }
+
+ static SourceId GetWhitelistedSourceId(int64_t id) {
+ return ConvertToSourceId(id, SourceIdType::NAVIGATION_ID);
+ }
+
+ static SourceId GetNonWhitelistedSourceId(int64_t id) {
+ return ConvertToSourceId(id, SourceIdType::UKM);
+ }
+
+ protected:
+ TestingPrefServiceSimple prefs_;
+ metrics::TestMetricsServiceClient client_;
+
+ scoped_refptr<base::TestSimpleTaskRunner> task_runner_;
+ base::ThreadTaskRunnerHandle task_runner_handle_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(UkmServiceTest);
+};
+
+} // namespace
+
+TEST_F(UkmServiceTest, EnableDisableSchedule) {
+ UkmService service(&prefs_, &client_,
+ true /* restrict_to_whitelisted_entries */);
+ EXPECT_FALSE(task_runner_->HasPendingTask());
+ service.Initialize();
+ EXPECT_FALSE(task_runner_->HasPendingTask());
+ service.EnableRecording(/*extensions=*/false);
+ service.EnableReporting();
+ EXPECT_TRUE(task_runner_->HasPendingTask());
+ service.DisableReporting();
+ task_runner_->RunPendingTasks();
+ EXPECT_FALSE(task_runner_->HasPendingTask());
+}
+
+TEST_F(UkmServiceTest, PersistAndPurge) {
+ base::FieldTrialList field_trial_list(nullptr /* entropy_provider */);
+ ScopedUkmFeatureParams params(base::FeatureList::OVERRIDE_ENABLE_FEATURE,
+ {{"WhitelistEntries", Entry1And2Whitelist()}});
+
+ UkmService service(&prefs_, &client_,
+ true /* restrict_to_whitelisted_entries */);
+ TestRecordingHelper recorder(&service);
+ EXPECT_EQ(GetPersistedLogCount(), 0);
+ service.Initialize();
+ task_runner_->RunUntilIdle();
+ service.EnableRecording(/*extensions=*/false);
+ service.EnableReporting();
+
+ SourceId id = GetWhitelistedSourceId(0);
+ recorder.UpdateSourceURL(id, GURL("https://google.com/foobar"));
+ // Should init, generate a log, and start an upload for source.
+ task_runner_->RunPendingTasks();
+ EXPECT_TRUE(client_.uploader()->is_uploading());
+ // Flushes the generated log to disk and generates a new entry.
+ TestEvent1(id).Record(&service);
+ service.Flush();
+ EXPECT_EQ(GetPersistedLogCount(), 2);
+ service.Purge();
+ EXPECT_EQ(GetPersistedLogCount(), 0);
+}
+
+TEST_F(UkmServiceTest, Purge) {
+ UkmService service(&prefs_, &client_,
+ true /* restrict_to_whitelisted_entries */);
+ TestRecordingHelper recorder(&service);
+ EXPECT_EQ(GetPersistedLogCount(), 0);
+ service.Initialize();
+ task_runner_->RunUntilIdle();
+ service.EnableRecording(/*extensions=*/false);
+ service.EnableReporting();
+
+ // Record some data
+ auto id = GetWhitelistedSourceId(0);
+ recorder.UpdateSourceURL(id, GURL("https://google.com/foobar1"));
+ TestEvent1(id).Record(&service);
+
+ // Purge should delete data, so there shouldn't be anything left to upload.
+ service.Purge();
+ service.Flush();
+ EXPECT_EQ(0, GetPersistedLogCount());
+}
+
+TEST_F(UkmServiceTest, SourceSerialization) {
+ UkmService service(&prefs_, &client_,
+ true /* restrict_to_whitelisted_entries */);
+ TestRecordingHelper recorder(&service);
+ EXPECT_EQ(GetPersistedLogCount(), 0);
+ service.Initialize();
+ task_runner_->RunUntilIdle();
+ service.EnableRecording(/*extensions=*/false);
+ service.EnableReporting();
+
+ UkmSource::NavigationData navigation_data;
+ navigation_data.urls = {GURL("https://google.com/initial"),
+ GURL("https://google.com/final")};
+
+ ukm::SourceId id = GetWhitelistedSourceId(0);
+ recorder.RecordNavigation(id, navigation_data);
+
+ service.Flush();
+ EXPECT_EQ(GetPersistedLogCount(), 1);
+
+ Report proto_report = GetPersistedReport();
+ EXPECT_EQ(1, proto_report.sources_size());
+ EXPECT_TRUE(proto_report.has_session_id());
+ const Source& proto_source = proto_report.sources(0);
+
+ EXPECT_EQ(id, proto_source.id());
+ EXPECT_EQ(GURL("https://google.com/final").spec(), proto_source.url());
+ EXPECT_FALSE(proto_source.has_initial_url());
+}
+
+TEST_F(UkmServiceTest, AddEntryWithEmptyMetrics) {
+ base::FieldTrialList field_trial_list(nullptr /* entropy_provider */);
+ ScopedUkmFeatureParams params(base::FeatureList::OVERRIDE_ENABLE_FEATURE,
+ {{"WhitelistEntries", Entry1And2Whitelist()}});
+
+ UkmService service(&prefs_, &client_,
+ true /* restrict_to_whitelisted_entries */);
+ TestRecordingHelper recorder(&service);
+ ASSERT_EQ(0, GetPersistedLogCount());
+ service.Initialize();
+ task_runner_->RunUntilIdle();
+ service.EnableRecording(/*extensions=*/false);
+ service.EnableReporting();
+
+ ukm::SourceId id = GetWhitelistedSourceId(0);
+ recorder.UpdateSourceURL(id, GURL("https://google.com/foobar"));
+
+ TestEvent1(id).Record(&service);
+ service.Flush();
+ ASSERT_EQ(1, GetPersistedLogCount());
+ Report proto_report = GetPersistedReport();
+ EXPECT_EQ(1, proto_report.entries_size());
+}
+
+TEST_F(UkmServiceTest, MetricsProviderTest) {
+ base::FieldTrialList field_trial_list(nullptr /* entropy_provider */);
+ ScopedUkmFeatureParams params(base::FeatureList::OVERRIDE_ENABLE_FEATURE,
+ {{"WhitelistEntries", Entry1And2Whitelist()}});
+
+ UkmService service(&prefs_, &client_,
+ true /* restrict_to_whitelisted_entries */);
+ TestRecordingHelper recorder(&service);
+
+ metrics::TestMetricsProvider* provider = new metrics::TestMetricsProvider();
+ service.RegisterMetricsProvider(
+ std::unique_ptr<metrics::MetricsProvider>(provider));
+
+ service.Initialize();
+
+ // Providers have not supplied system profile information yet.
+ EXPECT_FALSE(provider->provide_system_profile_metrics_called());
+
+ task_runner_->RunUntilIdle();
+ service.EnableRecording(/*extensions=*/false);
+ service.EnableReporting();
+
+ ukm::SourceId id = GetWhitelistedSourceId(0);
+ recorder.UpdateSourceURL(id, GURL("https://google.com/foobar"));
+ TestEvent1(id).Record(&service);
+ service.Flush();
+ EXPECT_EQ(GetPersistedLogCount(), 1);
+
+ Report proto_report = GetPersistedReport();
+ EXPECT_EQ(1, proto_report.sources_size());
+ EXPECT_EQ(1, proto_report.entries_size());
+
+ // Providers have now supplied system profile information.
+ EXPECT_TRUE(provider->provide_system_profile_metrics_called());
+}
+
+TEST_F(UkmServiceTest, LogsRotation) {
+ UkmService service(&prefs_, &client_,
+ true /* restrict_to_whitelisted_entries */);
+ TestRecordingHelper recorder(&service);
+ EXPECT_EQ(GetPersistedLogCount(), 0);
+ service.Initialize();
+ task_runner_->RunUntilIdle();
+ service.EnableRecording(/*extensions=*/false);
+ service.EnableReporting();
+
+ EXPECT_EQ(0, service.report_count());
+
+ // Log rotation should generate a log.
+ const ukm::SourceId id = GetWhitelistedSourceId(0);
+ recorder.UpdateSourceURL(id, GURL("https://google.com/foobar"));
+ task_runner_->RunPendingTasks();
+ EXPECT_EQ(1, service.report_count());
+ EXPECT_TRUE(client_.uploader()->is_uploading());
+
+ // Rotation shouldn't generate a log due to one being pending.
+ recorder.UpdateSourceURL(id, GURL("https://google.com/foobar"));
+ task_runner_->RunPendingTasks();
+ EXPECT_EQ(1, service.report_count());
+ EXPECT_TRUE(client_.uploader()->is_uploading());
+
+ // Completing the upload should clear pending log, then log rotation should
+ // generate another log.
+ client_.uploader()->CompleteUpload(200);
+ task_runner_->RunPendingTasks();
+ EXPECT_EQ(2, service.report_count());
+
+ // Check that rotations keep working.
+ for (int i = 3; i < 6; i++) {
+ task_runner_->RunPendingTasks();
+ client_.uploader()->CompleteUpload(200);
+ recorder.UpdateSourceURL(id, GURL("https://google.com/foobar"));
+ task_runner_->RunPendingTasks();
+ EXPECT_EQ(i, service.report_count());
+ }
+}
+
+TEST_F(UkmServiceTest, LogsUploadedOnlyWhenHavingSourcesOrEntries) {
+ base::FieldTrialList field_trial_list(nullptr /* entropy_provider */);
+ // Testing two whitelisted Entries.
+ ScopedUkmFeatureParams params(base::FeatureList::OVERRIDE_ENABLE_FEATURE,
+ {{"WhitelistEntries", Entry1And2Whitelist()}});
+
+ UkmService service(&prefs_, &client_,
+ true /* restrict_to_whitelisted_entries */);
+ TestRecordingHelper recorder(&service);
+ EXPECT_EQ(GetPersistedLogCount(), 0);
+ service.Initialize();
+ task_runner_->RunUntilIdle();
+ service.EnableRecording(/*extensions=*/false);
+ service.EnableReporting();
+
+ EXPECT_TRUE(task_runner_->HasPendingTask());
+ // Neither rotation or Flush should generate logs
+ task_runner_->RunPendingTasks();
+ service.Flush();
+ EXPECT_EQ(GetPersistedLogCount(), 0);
+
+ ukm::SourceId id = GetWhitelistedSourceId(0);
+ recorder.UpdateSourceURL(id, GURL("https://google.com/foobar"));
+ // Includes a Source, so will persist.
+ service.Flush();
+ EXPECT_EQ(GetPersistedLogCount(), 1);
+
+ TestEvent1(id).Record(&service);
+ // Includes an Entry, so will persist.
+ service.Flush();
+ EXPECT_EQ(GetPersistedLogCount(), 2);
+
+ recorder.UpdateSourceURL(id, GURL("https://google.com/foobar"));
+ TestEvent1(id).Record(&service);
+ // Includes a Source and an Entry, so will persist.
+ service.Flush();
+ EXPECT_EQ(GetPersistedLogCount(), 3);
+
+ // Current log has no Sources.
+ service.Flush();
+ EXPECT_EQ(GetPersistedLogCount(), 3);
+}
+
+TEST_F(UkmServiceTest, GetNewSourceID) {
+ ukm::SourceId id1 = UkmRecorder::GetNewSourceID();
+ ukm::SourceId id2 = UkmRecorder::GetNewSourceID();
+ ukm::SourceId id3 = UkmRecorder::GetNewSourceID();
+ EXPECT_NE(id1, id2);
+ EXPECT_NE(id1, id3);
+ EXPECT_NE(id2, id3);
+}
+
+TEST_F(UkmServiceTest, RecordInitialUrl) {
+ for (bool should_record_initial_url : {true, false}) {
+ base::FieldTrialList field_trial_list(nullptr /* entropy_provider */);
+ ScopedUkmFeatureParams params(
+ base::FeatureList::OVERRIDE_ENABLE_FEATURE,
+ {{"RecordInitialUrl", should_record_initial_url ? "true" : "false"}});
+
+ ClearPrefs();
+ UkmService service(&prefs_, &client_,
+ true /* restrict_to_whitelisted_entries */);
+ TestRecordingHelper recorder(&service);
+ EXPECT_EQ(GetPersistedLogCount(), 0);
+ service.Initialize();
+ task_runner_->RunUntilIdle();
+ service.EnableRecording(/*extensions=*/false);
+ service.EnableReporting();
+
+ ukm::SourceId id = GetWhitelistedSourceId(0);
+ UkmSource::NavigationData navigation_data;
+ navigation_data.urls = {GURL("https://google.com/initial"),
+ GURL("https://google.com/final")};
+ recorder.RecordNavigation(id, navigation_data);
+
+ service.Flush();
+ EXPECT_EQ(GetPersistedLogCount(), 1);
+
+ Report proto_report = GetPersistedReport();
+ EXPECT_EQ(1, proto_report.sources_size());
+ const Source& proto_source = proto_report.sources(0);
+
+ EXPECT_EQ(id, proto_source.id());
+ EXPECT_EQ(GURL("https://google.com/final").spec(), proto_source.url());
+ EXPECT_EQ(should_record_initial_url, proto_source.has_initial_url());
+ if (should_record_initial_url) {
+ EXPECT_EQ(GURL("https://google.com/initial").spec(),
+ proto_source.initial_url());
+ }
+ }
+}
+
+TEST_F(UkmServiceTest, RestrictToWhitelistedSourceIds) {
+ const GURL kURL = GURL("https://example.com/");
+ for (bool restrict_to_whitelisted_source_ids : {true, false}) {
+ base::FieldTrialList field_trial_list(nullptr /* entropy_provider */);
+ ScopedUkmFeatureParams params(
+ base::FeatureList::OVERRIDE_ENABLE_FEATURE,
+ {{"RestrictToWhitelistedSourceIds",
+ restrict_to_whitelisted_source_ids ? "true" : "false"},
+ {"WhitelistEntries", Entry1And2Whitelist()}});
+
+ ClearPrefs();
+ UkmService service(&prefs_, &client_,
+ true /* restrict_to_whitelisted_entries */);
+ TestRecordingHelper recorder(&service);
+ EXPECT_EQ(GetPersistedLogCount(), 0);
+ service.Initialize();
+ task_runner_->RunUntilIdle();
+ service.EnableRecording(/*extensions=*/false);
+ service.EnableReporting();
+
+ ukm::SourceId id1 = GetWhitelistedSourceId(0);
+ recorder.UpdateSourceURL(id1, kURL);
+ TestEvent1(id1).Record(&service);
+
+ // Create a non-navigation-based sourceid, which should not be whitelisted.
+ ukm::SourceId id2 = GetNonWhitelistedSourceId(1);
+ recorder.UpdateSourceURL(id2, kURL);
+ TestEvent1(id2).Record(&service);
+
+ service.Flush();
+ ASSERT_EQ(GetPersistedLogCount(), 1);
+ Report proto_report = GetPersistedReport();
+ ASSERT_GE(proto_report.sources_size(), 1);
+
+ // The whitelisted source should always be recorded.
+ const Source& proto_source1 = proto_report.sources(0);
+ EXPECT_EQ(id1, proto_source1.id());
+ EXPECT_EQ(kURL.spec(), proto_source1.url());
+
+ // The non-whitelisted source should only be recorded if we aren't
+ // restricted to whitelisted source ids.
+ if (restrict_to_whitelisted_source_ids) {
+ ASSERT_EQ(1, proto_report.sources_size());
+ } else {
+ ASSERT_EQ(2, proto_report.sources_size());
+ const Source& proto_source2 = proto_report.sources(1);
+ EXPECT_EQ(id2, proto_source2.id());
+ EXPECT_EQ(kURL.spec(), proto_source2.url());
+ }
+ }
+}
+
+TEST_F(UkmServiceTest, RecordSessionId) {
+ ClearPrefs();
+ UkmService service(&prefs_, &client_,
+ true /* restrict_to_whitelisted_entries */);
+ TestRecordingHelper recorder(&service);
+ EXPECT_EQ(0, GetPersistedLogCount());
+ service.Initialize();
+ task_runner_->RunUntilIdle();
+ service.EnableRecording(/*extensions=*/false);
+ service.EnableReporting();
+
+ auto id = GetWhitelistedSourceId(0);
+ recorder.UpdateSourceURL(id, GURL("https://google.com/foobar"));
+
+ service.Flush();
+ EXPECT_EQ(1, GetPersistedLogCount());
+
+ auto proto_report = GetPersistedReport();
+ EXPECT_TRUE(proto_report.has_session_id());
+ EXPECT_EQ(1, proto_report.report_id());
+}
+
+TEST_F(UkmServiceTest, SourceSize) {
+ base::FieldTrialList field_trial_list(nullptr /* entropy_provider */);
+ // Set a threshold of number of Sources via Feature Params.
+ ScopedUkmFeatureParams params(base::FeatureList::OVERRIDE_ENABLE_FEATURE,
+ {{"MaxSources", "2"}});
+
+ ClearPrefs();
+ UkmService service(&prefs_, &client_,
+ true /* restrict_to_whitelisted_entries */);
+ TestRecordingHelper recorder(&service);
+ EXPECT_EQ(0, GetPersistedLogCount());
+ service.Initialize();
+ task_runner_->RunUntilIdle();
+ service.EnableRecording(/*extensions=*/false);
+ service.EnableReporting();
+
+ auto id = GetWhitelistedSourceId(0);
+ recorder.UpdateSourceURL(id, GURL("https://google.com/foobar1"));
+ id = GetWhitelistedSourceId(1);
+ recorder.UpdateSourceURL(id, GURL("https://google.com/foobar2"));
+ id = GetWhitelistedSourceId(2);
+ recorder.UpdateSourceURL(id, GURL("https://google.com/foobar3"));
+
+ service.Flush();
+ EXPECT_EQ(1, GetPersistedLogCount());
+
+ auto proto_report = GetPersistedReport();
+ // Note, 2 instead of 3 sources, since we overrode the max number of sources
+ // via Feature params.
+ EXPECT_EQ(2, proto_report.sources_size());
+}
+
+TEST_F(UkmServiceTest, PurgeMidUpload) {
+ UkmService service(&prefs_, &client_,
+ true /* restrict_to_whitelisted_entries */);
+ TestRecordingHelper recorder(&service);
+ EXPECT_EQ(GetPersistedLogCount(), 0);
+ service.Initialize();
+ task_runner_->RunUntilIdle();
+ service.EnableRecording(/*extensions=*/false);
+ service.EnableReporting();
+
+ auto id = GetWhitelistedSourceId(0);
+ recorder.UpdateSourceURL(id, GURL("https://google.com/foobar1"));
+ // Should init, generate a log, and start an upload.
+ task_runner_->RunPendingTasks();
+ EXPECT_TRUE(client_.uploader()->is_uploading());
+ // Purge should delete all logs, including the one being sent.
+ service.Purge();
+ // Upload succeeds after logs was deleted.
+ client_.uploader()->CompleteUpload(200);
+ EXPECT_EQ(GetPersistedLogCount(), 0);
+ EXPECT_FALSE(client_.uploader()->is_uploading());
+}
+
+TEST_F(UkmServiceTest, WhitelistEntryTest) {
+ base::FieldTrialList field_trial_list(nullptr /* entropy_provider */);
+ // Testing two whitelisted Entries.
+ ScopedUkmFeatureParams params(base::FeatureList::OVERRIDE_ENABLE_FEATURE,
+ {{"WhitelistEntries", Entry1And2Whitelist()}});
+
+ ClearPrefs();
+ UkmService service(&prefs_, &client_,
+ true /* restrict_to_whitelisted_entries */);
+ TestRecordingHelper recorder(&service);
+ EXPECT_EQ(0, GetPersistedLogCount());
+ service.Initialize();
+ task_runner_->RunUntilIdle();
+ service.EnableRecording(/*extensions=*/false);
+ service.EnableReporting();
+
+ auto id = GetWhitelistedSourceId(0);
+ recorder.UpdateSourceURL(id, GURL("https://google.com/foobar1"));
+
+ TestEvent1(id).Record(&service);
+ TestEvent2(id).Record(&service);
+ // Note that this third entry is not in the whitelist.
+ TestEvent3(id).Record(&service);
+
+ service.Flush();
+ EXPECT_EQ(1, GetPersistedLogCount());
+ Report proto_report = GetPersistedReport();
+
+ // Verify we've added one source and 2 entries.
+ EXPECT_EQ(1, proto_report.sources_size());
+ ASSERT_EQ(2, proto_report.entries_size());
+
+ const Entry& proto_entry_a = proto_report.entries(0);
+ EXPECT_EQ(id, proto_entry_a.source_id());
+ EXPECT_EQ(base::HashMetricName(TestEvent1::kEntryName),
+ proto_entry_a.event_hash());
+
+ const Entry& proto_entry_b = proto_report.entries(1);
+ EXPECT_EQ(id, proto_entry_b.source_id());
+ EXPECT_EQ(base::HashMetricName(TestEvent2::kEntryName),
+ proto_entry_b.event_hash());
+}
+
+TEST_F(UkmServiceTest, SourceURLLength) {
+ UkmService service(&prefs_, &client_,
+ true /* restrict_to_whitelisted_entries */);
+ TestRecordingHelper recorder(&service);
+ EXPECT_EQ(0, GetPersistedLogCount());
+ service.Initialize();
+ task_runner_->RunUntilIdle();
+ service.EnableRecording(/*extensions=*/false);
+ service.EnableReporting();
+
+ auto id = GetWhitelistedSourceId(0);
+
+ // This URL is too long to be recorded fully.
+ const std::string long_string =
+ "https://example.com/" + std::string(10000, 'a');
+ recorder.UpdateSourceURL(id, GURL(long_string));
+
+ service.Flush();
+ EXPECT_EQ(1, GetPersistedLogCount());
+
+ auto proto_report = GetPersistedReport();
+ ASSERT_EQ(1, proto_report.sources_size());
+ const Source& proto_source = proto_report.sources(0);
+ EXPECT_EQ("URLTooLong", proto_source.url());
+}
+
+TEST_F(UkmServiceTest, UnreferencedNonWhitelistedSources) {
+ const GURL kURL("https://google.com/foobar");
+ for (bool restrict_to_whitelisted_source_ids : {true, false}) {
+ base::FieldTrialList field_trial_list(nullptr /* entropy_provider */);
+ // Set a threshold of number of Sources via Feature Params.
+ ScopedUkmFeatureParams params(
+ base::FeatureList::OVERRIDE_ENABLE_FEATURE,
+ {{"MaxKeptSources", "3"},
+ {"WhitelistEntries", Entry1And2Whitelist()},
+ {"RestrictToWhitelistedSourceIds",
+ restrict_to_whitelisted_source_ids ? "true" : "false"}});
+
+ ClearPrefs();
+ UkmService service(&prefs_, &client_,
+ true /* restrict_to_whitelisted_entries */);
+ TestRecordingHelper recorder(&service);
+ EXPECT_EQ(0, GetPersistedLogCount());
+ service.Initialize();
+ task_runner_->RunUntilIdle();
+ service.EnableRecording(/*extensions=*/false);
+ service.EnableReporting();
+
+ // Record with whitelisted ID to whitelist the URL.
+ // Use a larger ID to make it last in the proto.
+ ukm::SourceId whitelisted_id = GetWhitelistedSourceId(100);
+ recorder.UpdateSourceURL(whitelisted_id, kURL);
+
+ std::vector<SourceId> ids;
+ base::TimeTicks last_time = base::TimeTicks::Now();
+ for (int i = 0; i < 6; ++i) {
+ // Wait until base::TimeTicks::Now() no longer equals |last_time|. This
+ // ensures each source has a unique timestamp to avoid flakes. Should take
+ // between 1-15ms per documented resolution of base::TimeTicks.
+ while (base::TimeTicks::Now() == last_time) {
+ base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(1));
+ }
+
+ ids.push_back(GetNonWhitelistedSourceId(i));
+ recorder.UpdateSourceURL(ids.back(), kURL);
+ last_time = base::TimeTicks::Now();
+ }
+
+ // Add whitelisted entries for 0, 2 and non-whitelisted entries for 2, 3.
+ TestEvent1(ids[0]).Record(&service);
+ TestEvent2(ids[2]).Record(&service);
+ TestEvent3(ids[2]).Record(&service);
+ TestEvent3(ids[3]).Record(&service);
+
+ service.Flush();
+ EXPECT_EQ(1, GetPersistedLogCount());
+ auto proto_report = GetPersistedReport();
+
+ if (restrict_to_whitelisted_source_ids) {
+ EXPECT_EQ(1, proto_report.source_counts().observed());
+ EXPECT_EQ(1, proto_report.source_counts().navigation_sources());
+ EXPECT_EQ(0, proto_report.source_counts().unmatched_sources());
+ EXPECT_EQ(0, proto_report.source_counts().deferred_sources());
+ EXPECT_EQ(0, proto_report.source_counts().carryover_sources());
+
+ ASSERT_EQ(1, proto_report.sources_size());
+ } else {
+ EXPECT_EQ(7, proto_report.source_counts().observed());
+ EXPECT_EQ(1, proto_report.source_counts().navigation_sources());
+ EXPECT_EQ(0, proto_report.source_counts().unmatched_sources());
+ EXPECT_EQ(4, proto_report.source_counts().deferred_sources());
+ EXPECT_EQ(0, proto_report.source_counts().carryover_sources());
+
+ ASSERT_EQ(3, proto_report.sources_size());
+ EXPECT_EQ(ids[0], proto_report.sources(0).id());
+ EXPECT_EQ(kURL.spec(), proto_report.sources(0).url());
+ EXPECT_EQ(ids[2], proto_report.sources(1).id());
+ EXPECT_EQ(kURL.spec(), proto_report.sources(1).url());
+ }
+
+ // Since MaxKeptSources is 3, only Sources 5, 4, 3 should be retained.
+ // Log entries under 0, 1, 3 and 4. Log them in reverse order - which
+ // shouldn't affect source ordering in the output.
+ // - Source 0 should not be re-transmitted since it was sent before.
+ // - Source 1 should not be transmitted due to MaxKeptSources param.
+ // - Sources 3 and 4 should be transmitted since they were not sent before.
+ TestEvent1(ids[4]).Record(&service);
+ TestEvent1(ids[3]).Record(&service);
+ TestEvent1(ids[1]).Record(&service);
+ TestEvent1(ids[0]).Record(&service);
+
+ service.Flush();
+ EXPECT_EQ(2, GetPersistedLogCount());
+ proto_report = GetPersistedReport();
+
+ if (restrict_to_whitelisted_source_ids) {
+ EXPECT_EQ(0, proto_report.source_counts().observed());
+ EXPECT_EQ(0, proto_report.source_counts().navigation_sources());
+ EXPECT_EQ(0, proto_report.source_counts().unmatched_sources());
+ EXPECT_EQ(0, proto_report.source_counts().deferred_sources());
+ EXPECT_EQ(0, proto_report.source_counts().carryover_sources());
+
+ ASSERT_EQ(0, proto_report.sources_size());
+ } else {
+ EXPECT_EQ(0, proto_report.source_counts().observed());
+ EXPECT_EQ(0, proto_report.source_counts().navigation_sources());
+ EXPECT_EQ(0, proto_report.source_counts().unmatched_sources());
+ EXPECT_EQ(1, proto_report.source_counts().deferred_sources());
+ EXPECT_EQ(3, proto_report.source_counts().carryover_sources());
+
+ ASSERT_EQ(2, proto_report.sources_size());
+ EXPECT_EQ(ids[3], proto_report.sources(0).id());
+ EXPECT_EQ(kURL.spec(), proto_report.sources(0).url());
+ EXPECT_EQ(ids[4], proto_report.sources(1).id());
+ EXPECT_EQ(kURL.spec(), proto_report.sources(1).url());
+ }
+ }
+}
+
+TEST_F(UkmServiceTest, NonWhitelistedUrls) {
+ const GURL kURL("https://google.com/foobar");
+ struct {
+ GURL url;
+ bool expected_kept;
+ } test_cases[] = {
+ {GURL("https://google.com/foobar"), true},
+ // For origin-only URLs, only the origin needs to be matched.
+ {GURL("https://google.com"), true},
+ {GURL("https://google.com/foobar2"), false},
+ {GURL("https://other.com"), false},
+ };
+
+ base::FieldTrialList field_trial_list(nullptr /* entropy_provider */);
+ ScopedUkmFeatureParams params(base::FeatureList::OVERRIDE_ENABLE_FEATURE,
+ {{"WhitelistEntries", Entry1And2Whitelist()}});
+
+ for (const auto& test : test_cases) {
+ ClearPrefs();
+ UkmService service(&prefs_, &client_,
+ true /* restrict_to_whitelisted_entries */);
+ TestRecordingHelper recorder(&service);
+
+ ASSERT_EQ(GetPersistedLogCount(), 0);
+ service.Initialize();
+ task_runner_->RunUntilIdle();
+ service.EnableRecording(/*extensions=*/false);
+ service.EnableReporting();
+
+ // Record with whitelisted ID to whitelist the URL.
+ ukm::SourceId whitelist_id = GetWhitelistedSourceId(1);
+ recorder.UpdateSourceURL(whitelist_id, kURL);
+
+ // Record non whitelisted ID with a entry.
+ ukm::SourceId nonwhitelist_id = GetNonWhitelistedSourceId(100);
+ recorder.UpdateSourceURL(nonwhitelist_id, test.url);
+ TestEvent1(nonwhitelist_id).Record(&service);
+
+ service.Flush();
+ ASSERT_EQ(1, GetPersistedLogCount());
+ auto proto_report = GetPersistedReport();
+
+ EXPECT_EQ(2, proto_report.source_counts().observed());
+ EXPECT_EQ(1, proto_report.source_counts().navigation_sources());
+ if (test.expected_kept) {
+ EXPECT_EQ(0, proto_report.source_counts().unmatched_sources());
+ ASSERT_EQ(2, proto_report.sources_size());
+ EXPECT_EQ(whitelist_id, proto_report.sources(0).id());
+ EXPECT_EQ(kURL, proto_report.sources(0).url());
+ EXPECT_EQ(nonwhitelist_id, proto_report.sources(1).id());
+ EXPECT_EQ(test.url, proto_report.sources(1).url());
+ } else {
+ EXPECT_EQ(1, proto_report.source_counts().unmatched_sources());
+ ASSERT_EQ(1, proto_report.sources_size());
+ EXPECT_EQ(whitelist_id, proto_report.sources(0).id());
+ EXPECT_EQ(kURL, proto_report.sources(0).url());
+ }
+ }
+}
+
+TEST_F(UkmServiceTest, NonWhitelistedCarryoverUrls) {
+ const GURL kURL("https://google.com/foobar");
+
+ struct {
+ // Source1 is recorded during the first rotation with no entry.
+ // An entry for it is recorded in the second rotation.
+ GURL source1_url;
+ // Should Source1 be seen in second rotation's log.
+ bool expect_source1;
+ // Source2 is recorded during the second rotation with an entry.
+ GURL source2_url;
+ // Should Source2 be seen in second rotation's log.
+ bool expect_source2;
+ } test_cases[] = {
+ // Recording the URL captures in the whitelist, which will also allow
+ // exact matches of the same URL.
+ {GURL("https://google.com/foobar"), true,
+ GURL("https://google.com/foobar"), true},
+ // Capturing a full URL shouldn't allow origin matches.
+ {GURL("https://google.com/foobar"), true, GURL("https://google.com"),
+ false},
+ // Uncaptured URLs won't get matched.
+ {GURL("https://google.com/foobar"), true, GURL("https://other.com"),
+ false},
+ // Origin should be capturable, and will remember the same origin.
+ {GURL("https://google.com"), true, GURL("https://google.com"), true},
+ // If the origin is captured, only the origin is remembered.
+ {GURL("https://google.com"), true, GURL("https://google.com/foobar"),
+ false},
+ // Uncaptured URLs won't get matched.
+ {GURL("https://google.com"), true, GURL("https://other.com"), false},
+ // If the URL isn't captured in the first round, it won't capture later.
+ {GURL("https://other.com"), false, GURL("https://google.com/foobar"),
+ false},
+ {GURL("https://other.com"), false, GURL("https://google.com"), false},
+ // Entries shouldn't whitelist themselves.
+ {GURL("https://other.com"), false, GURL("https://other.com"), false},
+ };
+
+ base::FieldTrialList field_trial_list(nullptr /* entropy_provider */);
+ ScopedUkmFeatureParams params(base::FeatureList::OVERRIDE_ENABLE_FEATURE,
+ {{"WhitelistEntries", Entry1And2Whitelist()}});
+
+ for (const auto& test : test_cases) {
+ ClearPrefs();
+ UkmService service(&prefs_, &client_,
+ true /* restrict_to_whitelisted_entries */);
+ TestRecordingHelper recorder(&service);
+
+ EXPECT_EQ(GetPersistedLogCount(), 0);
+ service.Initialize();
+ task_runner_->RunUntilIdle();
+ service.EnableRecording(/*extensions=*/false);
+ service.EnableReporting();
+
+ // Record with whitelisted ID to whitelist the URL.
+ ukm::SourceId whitelist_id = GetWhitelistedSourceId(1);
+ recorder.UpdateSourceURL(whitelist_id, kURL);
+
+ // Record test Source1 without an event.
+ ukm::SourceId nonwhitelist_id1 = GetNonWhitelistedSourceId(100);
+ recorder.UpdateSourceURL(nonwhitelist_id1, test.source1_url);
+
+ service.Flush();
+ ASSERT_EQ(1, GetPersistedLogCount());
+ auto proto_report = GetPersistedReport();
+
+ EXPECT_EQ(2, proto_report.source_counts().observed());
+ EXPECT_EQ(1, proto_report.source_counts().navigation_sources());
+ EXPECT_EQ(0, proto_report.source_counts().carryover_sources());
+ if (test.expect_source1) {
+ EXPECT_EQ(0, proto_report.source_counts().unmatched_sources());
+ EXPECT_EQ(1, proto_report.source_counts().deferred_sources());
+ } else {
+ EXPECT_EQ(1, proto_report.source_counts().unmatched_sources());
+ EXPECT_EQ(0, proto_report.source_counts().deferred_sources());
+ }
+ ASSERT_EQ(1, proto_report.sources_size());
+ EXPECT_EQ(whitelist_id, proto_report.sources(0).id());
+ EXPECT_EQ(kURL, proto_report.sources(0).url());
+
+ // Record the Source2 and events for Source1 and Source2.
+ ukm::SourceId nonwhitelist_id2 = GetNonWhitelistedSourceId(101);
+ recorder.UpdateSourceURL(nonwhitelist_id2, test.source2_url);
+ TestEvent1(nonwhitelist_id1).Record(&service);
+ TestEvent1(nonwhitelist_id2).Record(&service);
+
+ service.Flush();
+ ASSERT_EQ(2, GetPersistedLogCount());
+ proto_report = GetPersistedReport();
+
+ EXPECT_EQ(1, proto_report.source_counts().observed());
+ EXPECT_EQ(0, proto_report.source_counts().navigation_sources());
+ EXPECT_EQ(0, proto_report.source_counts().deferred_sources());
+ if (!test.expect_source1) {
+ EXPECT_FALSE(test.expect_source2);
+ EXPECT_EQ(1, proto_report.source_counts().unmatched_sources());
+ EXPECT_EQ(0, proto_report.source_counts().carryover_sources());
+ ASSERT_EQ(0, proto_report.sources_size());
+ } else if (!test.expect_source2) {
+ EXPECT_EQ(1, proto_report.source_counts().unmatched_sources());
+ EXPECT_EQ(1, proto_report.source_counts().carryover_sources());
+ ASSERT_EQ(1, proto_report.sources_size());
+ EXPECT_EQ(nonwhitelist_id1, proto_report.sources(0).id());
+ EXPECT_EQ(test.source1_url, proto_report.sources(0).url());
+ } else {
+ EXPECT_EQ(0, proto_report.source_counts().unmatched_sources());
+ EXPECT_EQ(1, proto_report.source_counts().carryover_sources());
+ ASSERT_EQ(2, proto_report.sources_size());
+ EXPECT_EQ(nonwhitelist_id1, proto_report.sources(0).id());
+ EXPECT_EQ(test.source1_url, proto_report.sources(0).url());
+ EXPECT_EQ(nonwhitelist_id2, proto_report.sources(1).id());
+ EXPECT_EQ(test.source2_url, proto_report.sources(1).url());
+ }
+ }
+}
+
+TEST_F(UkmServiceTest, SupportedSchemes) {
+ struct {
+ const char* url;
+ bool expected_kept;
+ } test_cases[] = {
+ {"http://google.ca/", true},
+ {"https://google.ca/", true},
+ {"ftp://google.ca/", true},
+ {"about:blank", true},
+ {"chrome://version/", true},
+ {"app://play/abcdefghijklmnopqrstuvwxyzabcdef/", true},
+ // chrome-extension are controlled by TestIsWebstoreExtension, above.
+ {"chrome-extension://bhcnanendmgjjeghamaccjnochlnhcgj/", true},
+ {"chrome-extension://abcdefghijklmnopqrstuvwxyzabcdef/", false},
+ {"file:///tmp/", false},
+ {"abc://google.ca/", false},
+ {"www.google.ca/", false},
+ };
+
+ base::FieldTrialList field_trial_list(nullptr /* entropy_provider */);
+ ScopedUkmFeatureParams params(base::FeatureList::OVERRIDE_ENABLE_FEATURE, {});
+ UkmService service(&prefs_, &client_,
+ true /* restrict_to_whitelisted_entries */);
+ TestRecordingHelper recorder(&service);
+ service.SetIsWebstoreExtensionCallback(
+ base::BindRepeating(&TestIsWebstoreExtension));
+
+ EXPECT_EQ(GetPersistedLogCount(), 0);
+ service.Initialize();
+ task_runner_->RunUntilIdle();
+ service.EnableRecording(/*extensions=*/true);
+ service.EnableReporting();
+
+ int64_t id_counter = 1;
+ int expected_kept_count = 0;
+ for (const auto& test : test_cases) {
+ auto source_id = GetWhitelistedSourceId(id_counter++);
+ recorder.UpdateSourceURL(source_id, GURL(test.url));
+ TestEvent1(source_id).Record(&service);
+ if (test.expected_kept)
+ ++expected_kept_count;
+ }
+
+ service.Flush();
+ EXPECT_EQ(GetPersistedLogCount(), 1);
+ Report proto_report = GetPersistedReport();
+
+ EXPECT_EQ(expected_kept_count, proto_report.sources_size());
+ for (const auto& test : test_cases) {
+ bool found = false;
+ for (int i = 0; i < proto_report.sources_size(); ++i) {
+ if (proto_report.sources(i).url() == test.url) {
+ found = true;
+ break;
+ }
+ }
+ EXPECT_EQ(test.expected_kept, found) << test.url;
+ }
+}
+
+TEST_F(UkmServiceTest, SupportedSchemesNoExtensions) {
+ struct {
+ const char* url;
+ bool expected_kept;
+ } test_cases[] = {
+ {"http://google.ca/", true},
+ {"https://google.ca/", true},
+ {"ftp://google.ca/", true},
+ {"about:blank", true},
+ {"chrome://version/", true},
+ {"app://play/abcdefghijklmnopqrstuvwxyzabcdef/", true},
+ {"chrome-extension://bhcnanendmgjjeghamaccjnochlnhcgj/", false},
+ {"chrome-extension://abcdefghijklmnopqrstuvwxyzabcdef/", false},
+ {"file:///tmp/", false},
+ {"abc://google.ca/", false},
+ {"www.google.ca/", false},
+ };
+
+ base::FieldTrialList field_trial_list(nullptr /* entropy_provider */);
+ ScopedUkmFeatureParams params(base::FeatureList::OVERRIDE_ENABLE_FEATURE, {});
+ UkmService service(&prefs_, &client_,
+ true /* restrict_to_whitelisted_entries */);
+ TestRecordingHelper recorder(&service);
+
+ EXPECT_EQ(GetPersistedLogCount(), 0);
+ service.Initialize();
+ task_runner_->RunUntilIdle();
+ service.EnableRecording(/*extensions=*/false);
+ service.EnableReporting();
+
+ int64_t id_counter = 1;
+ int expected_kept_count = 0;
+ for (const auto& test : test_cases) {
+ auto source_id = GetWhitelistedSourceId(id_counter++);
+ recorder.UpdateSourceURL(source_id, GURL(test.url));
+ TestEvent1(source_id).Record(&service);
+ if (test.expected_kept)
+ ++expected_kept_count;
+ }
+
+ service.Flush();
+ EXPECT_EQ(GetPersistedLogCount(), 1);
+ Report proto_report = GetPersistedReport();
+
+ EXPECT_EQ(expected_kept_count, proto_report.sources_size());
+ for (const auto& test : test_cases) {
+ bool found = false;
+ for (int i = 0; i < proto_report.sources_size(); ++i) {
+ if (proto_report.sources(i).url() == test.url) {
+ found = true;
+ break;
+ }
+ }
+ EXPECT_EQ(test.expected_kept, found) << test.url;
+ }
+}
+
+TEST_F(UkmServiceTest, SanitizeUrlAuthParams) {
+ UkmService service(&prefs_, &client_,
+ true /* restrict_to_whitelisted_entries */);
+ TestRecordingHelper recorder(&service);
+ EXPECT_EQ(0, GetPersistedLogCount());
+ service.Initialize();
+ task_runner_->RunUntilIdle();
+ service.EnableRecording(/*extensions=*/false);
+ service.EnableReporting();
+
+ auto id = GetWhitelistedSourceId(0);
+ recorder.UpdateSourceURL(id, GURL("https://username:password@example.com/"));
+
+ service.Flush();
+ EXPECT_EQ(1, GetPersistedLogCount());
+
+ auto proto_report = GetPersistedReport();
+ ASSERT_EQ(1, proto_report.sources_size());
+ const Source& proto_source = proto_report.sources(0);
+ EXPECT_EQ("https://example.com/", proto_source.url());
+}
+
+TEST_F(UkmServiceTest, SanitizeChromeUrlParams) {
+ struct {
+ const char* url;
+ const char* expected_url;
+ } test_cases[] = {
+ {"chrome://version/?foo=bar", "chrome://version/"},
+ {"about:blank?foo=bar", "about:blank"},
+ {"chrome://histograms/Variations", "chrome://histograms/Variations"},
+ {"http://google.ca/?foo=bar", "http://google.ca/?foo=bar"},
+ {"https://google.ca/?foo=bar", "https://google.ca/?foo=bar"},
+ {"ftp://google.ca/?foo=bar", "ftp://google.ca/?foo=bar"},
+ {"chrome-extension://bhcnanendmgjjeghamaccjnochlnhcgj/foo.html?a=b",
+ "chrome-extension://bhcnanendmgjjeghamaccjnochlnhcgj/"},
+ };
+
+ for (const auto& test : test_cases) {
+ ClearPrefs();
+
+ UkmService service(&prefs_, &client_,
+ true /* restrict_to_whitelisted_entries */);
+ TestRecordingHelper recorder(&service);
+ service.SetIsWebstoreExtensionCallback(
+ base::BindRepeating(&TestIsWebstoreExtension));
+
+ EXPECT_EQ(0, GetPersistedLogCount());
+ service.Initialize();
+ task_runner_->RunUntilIdle();
+ service.EnableRecording(/*extensions=*/true);
+ service.EnableReporting();
+
+ auto id = GetWhitelistedSourceId(0);
+ recorder.UpdateSourceURL(id, GURL(test.url));
+
+ service.Flush();
+ EXPECT_EQ(1, GetPersistedLogCount());
+
+ auto proto_report = GetPersistedReport();
+ ASSERT_EQ(1, proto_report.sources_size());
+ const Source& proto_source = proto_report.sources(0);
+ EXPECT_EQ(test.expected_url, proto_source.url());
+ }
+}
+
+} // namespace ukm
diff --git a/components/variations/BUILD.gn b/components/variations/BUILD.gn
new file mode 100644
index 0000000..1a46296
--- /dev/null
+++ b/components/variations/BUILD.gn
@@ -0,0 +1,203 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+if (!use_cobalt_customizations) {
+ import("//testing/test.gni")
+}
+
+if (is_android) {
+ import("//build/config/android/rules.gni")
+}
+
+static_library("variations") {
+ sources = [
+ "active_field_trials.cc",
+ "active_field_trials.h",
+ "android/variations_associated_data_android.cc",
+ "android/variations_seed_bridge.cc",
+ "android/variations_seed_bridge.h",
+ "caching_permuted_entropy_provider.cc",
+ "caching_permuted_entropy_provider.h",
+ "child_process_field_trial_syncer.cc",
+ "child_process_field_trial_syncer.h",
+ "client_filterable_state.cc",
+ "client_filterable_state.h",
+ "entropy_provider.cc",
+ "entropy_provider.h",
+ "experiment_labels.cc",
+ "experiment_labels.h",
+ "hashing.cc",
+ "hashing.h",
+ "metrics.cc",
+ "metrics.h",
+ "platform_field_trials.h",
+ "pref_names.cc",
+ "pref_names.h",
+ "processed_study.cc",
+ "processed_study.h",
+ "proto/client_variations.proto",
+ "proto/permuted_entropy_cache.proto",
+ "proto/study.proto",
+ "proto/variations_seed.proto",
+ "seed_response.cc",
+ "seed_response.h",
+ "study_filtering.cc",
+ "study_filtering.h",
+ "synthetic_trial_registry.cc",
+ "synthetic_trial_registry.h",
+ "synthetic_trials.cc",
+ "synthetic_trials.h",
+ "synthetic_trials_active_group_id_provider.cc",
+ "synthetic_trials_active_group_id_provider.h",
+ "variations_associated_data.cc",
+ "variations_associated_data.h",
+ "variations_crash_keys.cc",
+ "variations_crash_keys.h",
+ "variations_experiment_util.cc",
+ "variations_experiment_util.h",
+ "variations_http_header_provider.cc",
+ "variations_http_header_provider.h",
+ "variations_id_collection.cc",
+ "variations_id_collection.h",
+ "variations_request_scheduler.cc",
+ "variations_request_scheduler.h",
+ "variations_seed_processor.cc",
+ "variations_seed_processor.h",
+ "variations_seed_simulator.cc",
+ "variations_seed_simulator.h",
+ "variations_seed_store.cc",
+ "variations_seed_store.h",
+ "variations_switches.cc",
+ "variations_switches.h",
+ "variations_url_constants.cc",
+ "variations_url_constants.h",
+ ]
+
+ if (is_starboard) {
+ sources -= [
+ # These bring in JNI, which we do not want to bring into Cobalt.
+ "android/variations_associated_data_android.cc",
+ "android/variations_seed_bridge.cc",
+ "android/variations_seed_bridge.h",
+ # The ".proto" files were referenced here directly, but caused the build
+ # to fail.
+ "proto/client_variations.proto",
+ "proto/permuted_entropy_cache.proto",
+ "proto/study.proto",
+ "proto/variations_seed.proto",
+ ]
+ }
+
+ if (is_android || is_ios) {
+ sources += [
+ "variations_request_scheduler_mobile.cc",
+ "variations_request_scheduler_mobile.h",
+ ]
+ }
+
+ deps = [
+ "proto",
+ "//base",
+ "//components/crash/core/common:crash_key",
+ "//components/prefs",
+ "//crypto",
+ "//third_party/protobuf:protobuf_lite",
+ "//third_party/zlib/google:compression_utils",
+ ]
+
+ if (is_android) {
+ deps += [ ":jni" ]
+ }
+}
+
+if (is_android) {
+ generate_jni("jni") {
+ sources = [
+ "android/java/src/org/chromium/components/variations/VariationsAssociatedData.java",
+ "android/java/src/org/chromium/components/variations/firstrun/VariationsSeedBridge.java",
+ ]
+ jni_package = "variations"
+ }
+
+ android_library("load_seed_result_enum_java") {
+ deps = [ "//base:base_java" ]
+ srcjar_deps = [ ":load_seed_result_enum_srcjar" ]
+ }
+
+ java_cpp_enum("load_seed_result_enum_srcjar") {
+ sources = [ "metrics.h" ]
+ }
+}
+
+static_library("test_support") {
+ testonly = true
+ sources = [
+ "variations_params_manager.cc",
+ "variations_params_manager.h",
+ ]
+
+ public_deps = [
+ ":variations",
+ ]
+
+ deps = [
+ "field_trial_config:field_trial_config",
+ "//base/test:test_support",
+ ]
+}
+
+# TODO(b/283258321): Re-enable as many tests as posible.
+if (!use_cobalt_customizations) {
+ source_set("unit_tests") {
+ testonly = true
+ sources = [
+ "active_field_trials_unittest.cc",
+ "caching_permuted_entropy_provider_unittest.cc",
+ "child_process_field_trial_syncer_unittest.cc",
+ "entropy_provider_unittest.cc",
+ "experiment_labels_unittest.cc",
+ "hashing_unittest.cc",
+ "net/variations_command_line_unittest.cc",
+ "net/variations_http_headers_unittest.cc",
+ "study_filtering_unittest.cc",
+ "synthetic_trial_registry_unittest.cc",
+ "variations_associated_data_unittest.cc",
+ "variations_crash_keys_unittest.cc",
+ "variations_http_header_provider_unittest.cc",
+ "variations_id_collection_unittest.cc",
+ "variations_request_scheduler_unittest.cc",
+ "variations_seed_processor_unittest.cc",
+ "variations_seed_simulator_unittest.cc",
+ "variations_seed_store_unittest.cc",
+ ]
+
+ if (is_android || is_ios) {
+ sources += [ "variations_request_scheduler_mobile_unittest.cc" ]
+ }
+
+ deps = [
+ ":variations",
+ "net",
+ "proto",
+ "//base/test:test_support",
+ "//components/crash/core/common:crash_key",
+ "//components/prefs:test_support",
+ "//components/variations/field_trial_config:field_trial_config",
+ "//testing/gmock",
+ "//testing/gtest",
+ "//third_party/zlib/google:compression_utils",
+ ]
+ }
+
+ # Convenience testing target
+ test("variations_unittests") {
+ sources = [ "//components/test/run_all_unittests.cc" ]
+ deps = [
+ ":unit_tests",
+ "//components/test:test_support",
+ "//components/variations/field_trial_config:unit_tests",
+ "//components/variations/service:unit_tests",
+ ]
+ }
+}
diff --git a/components/variations/DEPS b/components/variations/DEPS
new file mode 100644
index 0000000..9f3a043
--- /dev/null
+++ b/components/variations/DEPS
@@ -0,0 +1,13 @@
+# This component is shared with the Chrome OS build, so it's important to limit
+# dependencies to a minimal set.
+include_rules = [
+ "-components",
+ "+components/compression",
+ "+components/crash/core/common",
+ "+components/prefs",
+ "+components/variations",
+ "+crypto",
+ "-net",
+ "+third_party/protobuf",
+ "+third_party/zlib/google",
+]
diff --git a/components/variations/METADATA b/components/variations/METADATA
new file mode 100644
index 0000000..50dbb3a
--- /dev/null
+++ b/components/variations/METADATA
@@ -0,0 +1,21 @@
+name: "variations"
+description:
+ "Filtered subtree at components/variations."
+
+third_party {
+ url {
+ type: LOCAL_SOURCE
+ value: "https://cobalt.googlesource.com/components/variations_filtered_mirror"
+ }
+ url {
+ type: GIT
+ value: "https://github.com/chromium/chromium"
+ }
+ # Closest commit hash to m70.
+ version: "00dc8f7e427f0f9db220aebffe77b4181cb68cf3"
+ last_upgrade_date {
+ year: 2018
+ month: 08
+ day: 30
+ }
+}
diff --git a/components/variations/OWNERS b/components/variations/OWNERS
new file mode 100644
index 0000000..03a77ec
--- /dev/null
+++ b/components/variations/OWNERS
@@ -0,0 +1,3 @@
+file://base/metrics/OWNERS
+
+# COMPONENT: Internals>Metrics>Variations
diff --git a/components/variations/active_field_trials.cc b/components/variations/active_field_trials.cc
new file mode 100644
index 0000000..f18dfe1
--- /dev/null
+++ b/components/variations/active_field_trials.cc
@@ -0,0 +1,92 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/variations/active_field_trials.h"
+
+#include <stddef.h>
+
+#include <vector>
+
+#include "base/strings/stringprintf.h"
+#include "base/strings/utf_string_conversions.h"
+#include "components/variations/hashing.h"
+#include "components/variations/synthetic_trials_active_group_id_provider.h"
+
+namespace variations {
+
+namespace {
+
+// Populates |name_group_ids| based on |active_groups|. Field trial names are
+// suffixed with |suffix| before hashing is executed.
+void GetFieldTrialActiveGroupIdsForActiveGroups(
+ base::StringPiece suffix,
+ const base::FieldTrial::ActiveGroups& active_groups,
+ std::vector<ActiveGroupId>* name_group_ids) {
+ DCHECK(name_group_ids->empty());
+ for (auto it = active_groups.begin(); it != active_groups.end(); ++it) {
+ name_group_ids->push_back(
+ MakeActiveGroupId(it->trial_name + suffix.as_string(),
+ it->group_name + suffix.as_string()));
+ }
+}
+
+void AppendActiveGroupIdsAsStrings(
+ const std::vector<ActiveGroupId> name_group_ids,
+ std::vector<std::string>* output) {
+ for (const auto& active_group_id : name_group_ids) {
+ output->push_back(base::StringPrintf("%x-%x", active_group_id.name,
+ active_group_id.group));
+ }
+}
+
+} // namespace
+
+ActiveGroupId MakeActiveGroupId(base::StringPiece trial_name,
+ base::StringPiece group_name) {
+ ActiveGroupId id;
+ id.name = HashName(trial_name);
+ id.group = HashName(group_name);
+ return id;
+}
+
+void GetFieldTrialActiveGroupIds(base::StringPiece suffix,
+ std::vector<ActiveGroupId>* name_group_ids) {
+ DCHECK(name_group_ids->empty());
+ // A note on thread safety: Since GetActiveFieldTrialGroups() is thread
+ // safe, and we operate on a separate list of that data, this function is
+ // technically thread safe as well, with respect to the FieldTrialList data.
+ base::FieldTrial::ActiveGroups active_groups;
+ base::FieldTrialList::GetActiveFieldTrialGroups(&active_groups);
+ GetFieldTrialActiveGroupIdsForActiveGroups(suffix, active_groups,
+ name_group_ids);
+}
+
+void GetFieldTrialActiveGroupIdsAsStrings(base::StringPiece suffix,
+ std::vector<std::string>* output) {
+ DCHECK(output->empty());
+ std::vector<ActiveGroupId> name_group_ids;
+ GetFieldTrialActiveGroupIds(suffix, &name_group_ids);
+ AppendActiveGroupIdsAsStrings(name_group_ids, output);
+}
+
+void GetSyntheticTrialGroupIdsAsString(std::vector<std::string>* output) {
+ std::vector<ActiveGroupId> name_group_ids;
+ SyntheticTrialsActiveGroupIdProvider::GetInstance()->GetActiveGroupIds(
+ &name_group_ids);
+ AppendActiveGroupIdsAsStrings(name_group_ids, output);
+}
+
+namespace testing {
+
+void TestGetFieldTrialActiveGroupIds(
+ base::StringPiece suffix,
+ const base::FieldTrial::ActiveGroups& active_groups,
+ std::vector<ActiveGroupId>* name_group_ids) {
+ GetFieldTrialActiveGroupIdsForActiveGroups(suffix, active_groups,
+ name_group_ids);
+}
+
+} // namespace testing
+
+} // namespace variations
diff --git a/components/variations/active_field_trials.h b/components/variations/active_field_trials.h
new file mode 100644
index 0000000..ecadf08
--- /dev/null
+++ b/components/variations/active_field_trials.h
@@ -0,0 +1,79 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_VARIATIONS_ACTIVE_FIELD_TRIALS_H_
+#define COMPONENTS_VARIATIONS_ACTIVE_FIELD_TRIALS_H_
+
+#include <stdint.h>
+
+#include <string>
+
+#include "base/metrics/field_trial.h"
+#include "base/strings/string_piece.h"
+
+namespace variations {
+
+// The Unique ID of a trial and its active group, where the name and group
+// identifiers are hashes of the trial and group name strings.
+struct ActiveGroupId {
+ uint32_t name;
+ uint32_t group;
+};
+
+// Returns an ActiveGroupId struct for the given trial and group names.
+ActiveGroupId MakeActiveGroupId(base::StringPiece trial_name,
+ base::StringPiece group_name);
+
+// We need to supply a Compare class for templates since ActiveGroupId is a
+// user-defined type.
+struct ActiveGroupIdCompare {
+ bool operator() (const ActiveGroupId& lhs, const ActiveGroupId& rhs) const {
+ // The group and name fields are just SHA-1 Hashes, so we just need to treat
+ // them as IDs and do a less-than comparison. We test group first, since
+ // name is more likely to collide.
+ if (lhs.group != rhs.group)
+ return lhs.group < rhs.group;
+ return lhs.name < rhs.name;
+ }
+};
+
+// Fills the supplied vector |name_group_ids| (which must be empty when called)
+// with unique ActiveGroupIds for each Field Trial that has a chosen group.
+// Field Trials for which a group has not been chosen yet are NOT returned in
+// this list. Field trial names are suffixed with |suffix| before hashing is
+// executed.
+void GetFieldTrialActiveGroupIds(base::StringPiece suffix,
+ std::vector<ActiveGroupId>* name_group_ids);
+
+// Fills the supplied vector |output| (which must be empty when called) with
+// unique string representations of ActiveGroupIds for each Field Trial that
+// has a chosen group. The strings are formatted as "<TrialName>-<GroupName>",
+// with the names as hex strings. Field Trials for which a group has not been
+// chosen yet are NOT returned in this list. Field trial names are suffixed with
+// |suffix| before hashing is executed.
+void GetFieldTrialActiveGroupIdsAsStrings(base::StringPiece suffix,
+ std::vector<std::string>* output);
+
+// TODO(rkaplow): Support suffixing for synthetic trials.
+// Fills the supplied vector |output| (which must be empty when called) with
+// unique string representations of ActiveGroupIds for each Syntehtic Trial
+// group. The strings are formatted as "<TrialName>-<GroupName>",
+// with the names as hex strings. Synthetic Field Trials for which a group
+// which hasn't been chosen yet are NOT returned in this list.
+void GetSyntheticTrialGroupIdsAsString(std::vector<std::string>* output);
+
+// Expose some functions for testing. These functions just wrap functionality
+// that is implemented above.
+namespace testing {
+
+void TestGetFieldTrialActiveGroupIds(
+ base::StringPiece suffix,
+ const base::FieldTrial::ActiveGroups& active_groups,
+ std::vector<ActiveGroupId>* name_group_ids);
+
+} // namespace testing
+
+} // namespace variations
+
+#endif // COMPONENTS_VARIATIONS_ACTIVE_FIELD_TRIALS_H_
diff --git a/components/variations/active_field_trials_unittest.cc b/components/variations/active_field_trials_unittest.cc
new file mode 100644
index 0000000..b934517
--- /dev/null
+++ b/components/variations/active_field_trials_unittest.cc
@@ -0,0 +1,77 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/variations/active_field_trials.h"
+
+#include <stddef.h>
+
+#include "base/strings/string_piece.h"
+#include "components/variations/hashing.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace variations {
+
+TEST(ActiveFieldTrialsTest, GetFieldTrialActiveGroups) {
+ typedef std::set<ActiveGroupId, ActiveGroupIdCompare> ActiveGroupIdSet;
+ std::string trial_one("trial one");
+ std::string group_one("group one");
+ std::string trial_two("trial two");
+ std::string group_two("group two");
+
+ base::FieldTrial::ActiveGroups active_groups;
+ base::FieldTrial::ActiveGroup active_group;
+ active_group.trial_name = trial_one;
+ active_group.group_name = group_one;
+ active_groups.push_back(active_group);
+
+ active_group.trial_name = trial_two;
+ active_group.group_name = group_two;
+ active_groups.push_back(active_group);
+
+ // Create our expected groups of IDs.
+ ActiveGroupIdSet expected_groups;
+ ActiveGroupId name_group_id;
+ name_group_id.name = HashName(trial_one);
+ name_group_id.group = HashName(group_one);
+ expected_groups.insert(name_group_id);
+ name_group_id.name = HashName(trial_two);
+ name_group_id.group = HashName(group_two);
+ expected_groups.insert(name_group_id);
+
+ std::vector<ActiveGroupId> active_group_ids;
+ testing::TestGetFieldTrialActiveGroupIds(base::StringPiece(), active_groups,
+ &active_group_ids);
+ EXPECT_EQ(2U, active_group_ids.size());
+ for (size_t i = 0; i < active_group_ids.size(); ++i) {
+ ActiveGroupIdSet::iterator expected_group =
+ expected_groups.find(active_group_ids[i]);
+ EXPECT_FALSE(expected_group == expected_groups.end());
+ expected_groups.erase(expected_group);
+ }
+ EXPECT_EQ(0U, expected_groups.size());
+}
+
+TEST(ActiveFieldTrialsTest, GetFieldTrialActiveGroupsWithSuffix) {
+ std::string trial_one("trial one");
+ std::string group_one("group one");
+ std::string suffix("some_suffix");
+
+ base::FieldTrial::ActiveGroups active_groups;
+ base::FieldTrial::ActiveGroup active_group;
+ active_group.trial_name = trial_one;
+ active_group.group_name = group_one;
+ active_groups.push_back(active_group);
+
+ std::vector<ActiveGroupId> active_group_ids;
+ testing::TestGetFieldTrialActiveGroupIds(suffix, active_groups,
+ &active_group_ids);
+ EXPECT_EQ(1U, active_group_ids.size());
+
+ uint32_t expected_name = HashName("trial onesome_suffix");
+ uint32_t expected_group = HashName("group onesome_suffix");
+ EXPECT_EQ(expected_name, active_group_ids[0].name);
+ EXPECT_EQ(expected_group, active_group_ids[0].group);
+}
+
+} // namespace variations
diff --git a/components/variations/android/BUILD.gn b/components/variations/android/BUILD.gn
new file mode 100644
index 0000000..ec12159
--- /dev/null
+++ b/components/variations/android/BUILD.gn
@@ -0,0 +1,31 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//build/config/android/rules.gni")
+
+if (!use_cobalt_customizations) {
+ android_library("variations_java") {
+ deps = [
+ "//base:base_java",
+ "//third_party/android_tools:android_support_core_utils_java",
+ ]
+ java_files = [
+ "java/src/org/chromium/components/variations/VariationsAssociatedData.java",
+ "java/src/org/chromium/components/variations/firstrun/VariationsSeedBridge.java",
+ "java/src/org/chromium/components/variations/firstrun/VariationsSeedFetcher.java",
+ ]
+ }
+
+ junit_binary("components_variations_junit_tests") {
+ java_files = [ "junit/src/org/chromium/components/variations/firstrun/VariationsSeedFetcherTest.java" ]
+ deps = [
+ ":variations_java",
+ "//base:base_java",
+ "//base:base_java_test_support",
+ "//base:base_junit_test_support",
+ "//third_party/hamcrest:hamcrest_java",
+ ]
+ }
+}
+
diff --git a/components/variations/android/DEPS b/components/variations/android/DEPS
new file mode 100644
index 0000000..c80012b
--- /dev/null
+++ b/components/variations/android/DEPS
@@ -0,0 +1,3 @@
+include_rules = [
+ "+jni",
+]
diff --git a/components/variations/android/java/src/org/chromium/components/variations/VariationsAssociatedData.java b/components/variations/android/java/src/org/chromium/components/variations/VariationsAssociatedData.java
new file mode 100644
index 0000000..8d7264d
--- /dev/null
+++ b/components/variations/android/java/src/org/chromium/components/variations/VariationsAssociatedData.java
@@ -0,0 +1,38 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.components.variations;
+
+import org.chromium.base.annotations.JNINamespace;
+
+import java.util.HashMap;
+
+/**
+ * Wrapper for variations.
+ */
+@JNINamespace("variations::android")
+public final class VariationsAssociatedData {
+
+ private VariationsAssociatedData() {
+ }
+
+ /**
+ * @param trialName The name of the trial to get the param value for.
+ * @param paramName The name of the param for which to get the value.
+ * @return The parameter value. Empty string if the field trial does not exist or the specified
+ * parameter does not exist.
+ */
+ public static String getVariationParamValue(String trialName, String paramName) {
+ return nativeGetVariationParamValue(trialName, paramName);
+ }
+
+ public static HashMap<String, String> getFeedbackMap() {
+ HashMap<String, String> map = new HashMap<String, String>();
+ map.put("Chrome Variations", nativeGetFeedbackVariations());
+ return map;
+ }
+
+ private static native String nativeGetVariationParamValue(String trialName, String paramName);
+ private static native String nativeGetFeedbackVariations();
+}
diff --git a/components/variations/android/java/src/org/chromium/components/variations/firstrun/VariationsSeedBridge.java b/components/variations/android/java/src/org/chromium/components/variations/firstrun/VariationsSeedBridge.java
new file mode 100644
index 0000000..93500c6
--- /dev/null
+++ b/components/variations/android/java/src/org/chromium/components/variations/firstrun/VariationsSeedBridge.java
@@ -0,0 +1,116 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.components.variations.firstrun;
+
+import android.util.Base64;
+
+import org.chromium.base.ContextUtils;
+import org.chromium.base.annotations.CalledByNative;
+
+/**
+ * VariationsSeedBridge is a class which is used to pass variations first run seed that was fetched
+ * before the actual Chrome first run to Chromium core. Class provides methods to store the seed
+ * in SharedPreferences and to get the seed from there. To store raw seed data class serializes
+ * byte[] to Base64 encoded string and decodes this string before passing to C++ side.
+ */
+public class VariationsSeedBridge {
+ protected static final String VARIATIONS_FIRST_RUN_SEED_BASE64 = "variations_seed_base64";
+ protected static final String VARIATIONS_FIRST_RUN_SEED_SIGNATURE = "variations_seed_signature";
+ protected static final String VARIATIONS_FIRST_RUN_SEED_COUNTRY = "variations_seed_country";
+ protected static final String VARIATIONS_FIRST_RUN_SEED_DATE = "variations_seed_date";
+ protected static final String VARIATIONS_FIRST_RUN_SEED_IS_GZIP_COMPRESSED =
+ "variations_seed_is_gzip_compressed";
+
+ // This pref is used to store information about successful seed storing on the C++ side, in
+ // order to not fetch the seed again.
+ protected static final String VARIATIONS_FIRST_RUN_SEED_NATIVE_STORED =
+ "variations_seed_native_stored";
+
+ protected static String getVariationsFirstRunSeedPref(String prefName) {
+ return ContextUtils.getAppSharedPreferences().getString(prefName, "");
+ }
+
+ /**
+ * Stores variations seed data (raw data, seed signature and country code) in SharedPreferences.
+ * CalledByNative attribute is used by unit tests code to set test data.
+ */
+ @CalledByNative
+ public static void setVariationsFirstRunSeed(byte[] rawSeed, String signature, String country,
+ String date, boolean isGzipCompressed) {
+ ContextUtils.getAppSharedPreferences()
+ .edit()
+ .putString(VARIATIONS_FIRST_RUN_SEED_BASE64,
+ Base64.encodeToString(rawSeed, Base64.NO_WRAP))
+ .putString(VARIATIONS_FIRST_RUN_SEED_SIGNATURE, signature)
+ .putString(VARIATIONS_FIRST_RUN_SEED_COUNTRY, country)
+ .putString(VARIATIONS_FIRST_RUN_SEED_DATE, date)
+ .putBoolean(VARIATIONS_FIRST_RUN_SEED_IS_GZIP_COMPRESSED, isGzipCompressed)
+ .apply();
+ }
+
+ @CalledByNative
+ private static void clearFirstRunPrefs() {
+ ContextUtils.getAppSharedPreferences()
+ .edit()
+ .remove(VARIATIONS_FIRST_RUN_SEED_BASE64)
+ .remove(VARIATIONS_FIRST_RUN_SEED_SIGNATURE)
+ .remove(VARIATIONS_FIRST_RUN_SEED_COUNTRY)
+ .remove(VARIATIONS_FIRST_RUN_SEED_DATE)
+ .remove(VARIATIONS_FIRST_RUN_SEED_IS_GZIP_COMPRESSED)
+ .apply();
+ }
+
+ /**
+ * Returns the status of the variations first run fetch: was it successful or not.
+ */
+ public static boolean hasJavaPref() {
+ return !ContextUtils.getAppSharedPreferences()
+ .getString(VARIATIONS_FIRST_RUN_SEED_BASE64, "")
+ .isEmpty();
+ }
+
+ /**
+ * Returns the status of the variations seed storing on the C++ side: was it successful or not.
+ */
+ public static boolean hasNativePref() {
+ return ContextUtils.getAppSharedPreferences().getBoolean(
+ VARIATIONS_FIRST_RUN_SEED_NATIVE_STORED, false);
+ }
+
+ @CalledByNative
+ private static void markVariationsSeedAsStored() {
+ ContextUtils.getAppSharedPreferences()
+ .edit()
+ .putBoolean(VARIATIONS_FIRST_RUN_SEED_NATIVE_STORED, true)
+ .apply();
+ }
+
+ @CalledByNative
+ private static byte[] getVariationsFirstRunSeedData() {
+ return Base64.decode(
+ getVariationsFirstRunSeedPref(VARIATIONS_FIRST_RUN_SEED_BASE64), Base64.NO_WRAP);
+ }
+
+ @CalledByNative
+ private static String getVariationsFirstRunSeedSignature() {
+ return getVariationsFirstRunSeedPref(VARIATIONS_FIRST_RUN_SEED_SIGNATURE);
+ }
+
+ @CalledByNative
+ private static String getVariationsFirstRunSeedCountry() {
+ return getVariationsFirstRunSeedPref(VARIATIONS_FIRST_RUN_SEED_COUNTRY);
+ }
+
+ @CalledByNative
+ private static String getVariationsFirstRunSeedDate() {
+ return getVariationsFirstRunSeedPref(VARIATIONS_FIRST_RUN_SEED_DATE);
+ }
+
+ @CalledByNative
+ private static boolean getVariationsFirstRunSeedIsGzipCompressed() {
+ return ContextUtils.getAppSharedPreferences().getBoolean(
+ VARIATIONS_FIRST_RUN_SEED_IS_GZIP_COMPRESSED, false);
+ }
+}
diff --git a/components/variations/android/java/src/org/chromium/components/variations/firstrun/VariationsSeedFetcher.java b/components/variations/android/java/src/org/chromium/components/variations/firstrun/VariationsSeedFetcher.java
new file mode 100644
index 0000000..0808161
--- /dev/null
+++ b/components/variations/android/java/src/org/chromium/components/variations/firstrun/VariationsSeedFetcher.java
@@ -0,0 +1,303 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.components.variations.firstrun;
+
+import android.content.SharedPreferences;
+import android.os.SystemClock;
+
+import org.chromium.base.ContextUtils;
+import org.chromium.base.Log;
+import org.chromium.base.ThreadUtils;
+import org.chromium.base.VisibleForTesting;
+import org.chromium.base.metrics.CachedMetrics.SparseHistogramSample;
+import org.chromium.base.metrics.CachedMetrics.TimesHistogramSample;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.HttpURLConnection;
+import java.net.MalformedURLException;
+import java.net.SocketTimeoutException;
+import java.net.URL;
+import java.net.UnknownHostException;
+import java.text.ParseException;
+import java.text.SimpleDateFormat;
+import java.util.Arrays;
+import java.util.Date;
+import java.util.Locale;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * Fetches the variations seed before the actual first run of Chrome.
+ */
+public class VariationsSeedFetcher {
+ private static final String TAG = "VariationsSeedFetch";
+
+ public enum VariationsPlatform { ANDROID, ANDROID_WEBVIEW }
+
+ private static final String VARIATIONS_SERVER_URL =
+ "https://clientservices.googleapis.com/chrome-variations/seed?osname=";
+
+ private static final int BUFFER_SIZE = 4096;
+ private static final int READ_TIMEOUT = 3000; // time in ms
+ private static final int REQUEST_TIMEOUT = 1000; // time in ms
+
+ // Values for the "Variations.FirstRun.SeedFetchResult" sparse histogram, which also logs
+ // HTTP result codes. These are negative so that they don't conflict with the HTTP codes.
+ // These values should not be renumbered or re-used since they are logged to UMA.
+ private static final int SEED_FETCH_RESULT_UNKNOWN_HOST_EXCEPTION = -3;
+ private static final int SEED_FETCH_RESULT_TIMEOUT = -2;
+ private static final int SEED_FETCH_RESULT_IOEXCEPTION = -1;
+
+ @VisibleForTesting
+ static final String VARIATIONS_INITIALIZED_PREF = "variations_initialized";
+
+ // Synchronization lock to make singleton thread-safe.
+ private static final Object sLock = new Object();
+
+ private static VariationsSeedFetcher sInstance;
+
+ @VisibleForTesting
+ public VariationsSeedFetcher() {}
+
+ public static VariationsSeedFetcher get() {
+ // TODO(aberent) Check not running on UI thread. Doing so however makes Robolectric testing
+ // of dependent classes difficult.
+ synchronized (sLock) {
+ if (sInstance == null) {
+ sInstance = new VariationsSeedFetcher();
+ }
+ return sInstance;
+ }
+ }
+
+ /**
+ * Override the VariationsSeedFetcher, typically with a mock, for testing classes that depend on
+ * this one.
+ * @param fetcher the mock.
+ */
+ @VisibleForTesting
+ public static void setVariationsSeedFetcherForTesting(VariationsSeedFetcher fetcher) {
+ sInstance = fetcher;
+ }
+
+ @VisibleForTesting
+ protected HttpURLConnection getServerConnection(
+ VariationsPlatform platform, String restrictMode, String milestone, String channel)
+ throws MalformedURLException, IOException {
+ String urlString = getConnectionString(platform, restrictMode, milestone, channel);
+ URL url = new URL(urlString);
+ return (HttpURLConnection) url.openConnection();
+ }
+
+ @VisibleForTesting
+ protected String getConnectionString(
+ VariationsPlatform platform, String restrictMode, String milestone, String channel) {
+ String urlString = VARIATIONS_SERVER_URL;
+ switch (platform) {
+ case ANDROID:
+ urlString += "android";
+ break;
+ case ANDROID_WEBVIEW:
+ urlString += "android_webview";
+ break;
+ default:
+ assert false;
+ }
+ if (restrictMode != null && !restrictMode.isEmpty()) {
+ urlString += "&restrict=" + restrictMode;
+ }
+ if (milestone != null && !milestone.isEmpty()) {
+ urlString += "&milestone=" + milestone;
+ }
+ if (channel != null && !channel.isEmpty()) {
+ urlString += "&channel=" + channel;
+ }
+
+ return urlString;
+ }
+
+ /**
+ * Object holding the seed data and related fields retrieved from HTTP headers.
+ */
+ public static class SeedInfo {
+ // If you add fields, see VariationsTestUtils.
+ public String signature;
+ public String country;
+ public String date;
+ public boolean isGzipCompressed;
+ public byte[] seedData;
+
+ public Date parseDate() throws ParseException {
+ // The date field comes from the HTTP "Date" header, which has this format. (See RFC
+ // 2616, sections 3.3.1 and 14.18.) SimpleDateFormat is weirdly not thread-safe, so
+ // instantiate a new one for each call.
+ return new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss z", Locale.US).parse(date);
+ }
+
+ @Override
+ public String toString() {
+ return "SeedInfo{signature=\"" + signature + "\" country=\"" + country
+ + "\" date=\"" + date + " isGzipCompressed=" + isGzipCompressed
+ + " seedData=" + Arrays.toString(seedData);
+ }
+ }
+
+
+ /**
+ * Fetch the first run variations seed.
+ * @param restrictMode The restrict mode parameter to pass to the server via a URL param.
+ * @param milestone The milestone parameter to pass to the server via a URL param.
+ * @param channel The channel parameter to pass to the server via a URL param.
+ */
+ public void fetchSeed(String restrictMode, String milestone, String channel) {
+ assert !ThreadUtils.runningOnUiThread();
+ // Prevent multiple simultaneous fetches
+ synchronized (sLock) {
+ SharedPreferences prefs = ContextUtils.getAppSharedPreferences();
+ // Early return if an attempt has already been made to fetch the seed, even if it
+ // failed. Only attempt to get the initial Java seed once, since a failure probably
+ // indicates a network problem that is unlikely to be resolved by a second attempt.
+ // Note that VariationsSeedBridge.hasNativePref() is a pure Java function, reading an
+ // Android preference that is set when the seed is fetched by the native code.
+ if (prefs.getBoolean(VARIATIONS_INITIALIZED_PREF, false)
+ || VariationsSeedBridge.hasNativePref()) {
+ return;
+ }
+
+ try {
+ SeedInfo info = downloadContent(
+ VariationsPlatform.ANDROID, restrictMode, milestone, channel);
+ VariationsSeedBridge.setVariationsFirstRunSeed(info.seedData, info.signature,
+ info.country, info.date, info.isGzipCompressed);
+ } catch (IOException e) {
+ Log.e(TAG, "IOException when fetching variations seed.", e);
+ // Exceptions are handled and logged in the downloadContent method, so we don't
+ // need any exception handling here. The only reason we need a catch-statement here
+ // is because those exceptions are re-thrown from downloadContent to skip the
+ // normal logic flow within that method.
+ }
+ // VARIATIONS_INITIALIZED_PREF should still be set to true when exceptions occur
+ prefs.edit().putBoolean(VARIATIONS_INITIALIZED_PREF, true).apply();
+ }
+ }
+
+ private void recordFetchResultOrCode(int resultOrCode) {
+ SparseHistogramSample histogram =
+ new SparseHistogramSample("Variations.FirstRun.SeedFetchResult");
+ histogram.record(resultOrCode);
+ }
+
+ private void recordSeedFetchTime(long timeDeltaMillis) {
+ Log.i(TAG, "Fetched first run seed in " + timeDeltaMillis + " ms");
+ TimesHistogramSample histogram = new TimesHistogramSample(
+ "Variations.FirstRun.SeedFetchTime", TimeUnit.MILLISECONDS);
+ histogram.record(timeDeltaMillis);
+ }
+
+ private void recordSeedConnectTime(long timeDeltaMillis) {
+ TimesHistogramSample histogram = new TimesHistogramSample(
+ "Variations.FirstRun.SeedConnectTime", TimeUnit.MILLISECONDS);
+ histogram.record(timeDeltaMillis);
+ }
+
+ /**
+ * Download the variations seed data with platform and retrictMode.
+ * @param platform the platform parameter to let server only return experiments which can be
+ * run on that platform.
+ * @param restrictMode the restrict mode parameter to pass to the server via a URL param.
+ * @param milestone the milestone parameter to pass to the server via a URL param.
+ * @param channel the channel parameter to pass to the server via a URL param.
+ * @return the object holds the seed data and its related header fields.
+ * @throws SocketTimeoutException when fetching seed connection times out.
+ * @throws UnknownHostException when fetching seed connection has an unknown host.
+ * @throws IOException when response code is not HTTP_OK or transmission fails on the open
+ * connection.
+ */
+ public SeedInfo downloadContent(
+ VariationsPlatform platform, String restrictMode, String milestone, String channel)
+ throws SocketTimeoutException, UnknownHostException, IOException {
+ HttpURLConnection connection = null;
+ try {
+ long startTimeMillis = SystemClock.elapsedRealtime();
+ connection = getServerConnection(platform, restrictMode, milestone, channel);
+ connection.setReadTimeout(READ_TIMEOUT);
+ connection.setConnectTimeout(REQUEST_TIMEOUT);
+ connection.setDoInput(true);
+ connection.setRequestProperty("A-IM", "gzip");
+ connection.connect();
+ int responseCode = connection.getResponseCode();
+ recordFetchResultOrCode(responseCode);
+ if (responseCode != HttpURLConnection.HTTP_OK) {
+ String errorMsg = "Non-OK response code = " + responseCode;
+ Log.w(TAG, errorMsg);
+ throw new IOException(errorMsg);
+ }
+
+ recordSeedConnectTime(SystemClock.elapsedRealtime() - startTimeMillis);
+
+ SeedInfo info = new SeedInfo();
+ info.seedData = getRawSeed(connection);
+ info.signature = getHeaderFieldOrEmpty(connection, "X-Seed-Signature");
+ info.country = getHeaderFieldOrEmpty(connection, "X-Country");
+ info.date = getHeaderFieldOrEmpty(connection, "Date");
+ info.isGzipCompressed = getHeaderFieldOrEmpty(connection, "IM").equals("gzip");
+ recordSeedFetchTime(SystemClock.elapsedRealtime() - startTimeMillis);
+ return info;
+ } catch (SocketTimeoutException e) {
+ recordFetchResultOrCode(SEED_FETCH_RESULT_TIMEOUT);
+ Log.w(TAG, "SocketTimeoutException timeout when fetching variations seed.", e);
+ throw e;
+ } catch (UnknownHostException e) {
+ recordFetchResultOrCode(SEED_FETCH_RESULT_UNKNOWN_HOST_EXCEPTION);
+ Log.w(TAG, "UnknownHostException unknown host when fetching variations seed.", e);
+ throw e;
+ } catch (IOException e) {
+ recordFetchResultOrCode(SEED_FETCH_RESULT_IOEXCEPTION);
+ Log.w(TAG, "IOException when fetching variations seed.", e);
+ throw e;
+ } finally {
+ if (connection != null) {
+ connection.disconnect();
+ }
+ }
+ }
+
+ /**
+ * Convert a input stream into a byte array.
+ * @param inputStream the input stream
+ * @return the byte array which holds the data from the input stream
+ * @throws IOException if I/O error occurs when reading data from the input stream
+ */
+ public static byte[] convertInputStreamToByteArray(InputStream inputStream) throws IOException {
+ ByteArrayOutputStream byteBuffer = new ByteArrayOutputStream();
+ byte[] buffer = new byte[BUFFER_SIZE];
+ int charactersReadCount = 0;
+ while ((charactersReadCount = inputStream.read(buffer)) != -1) {
+ byteBuffer.write(buffer, 0, charactersReadCount);
+ }
+ return byteBuffer.toByteArray();
+ }
+
+ private String getHeaderFieldOrEmpty(HttpURLConnection connection, String name) {
+ String headerField = connection.getHeaderField(name);
+ if (headerField == null) {
+ return "";
+ }
+ return headerField.trim();
+ }
+
+ private byte[] getRawSeed(HttpURLConnection connection) throws IOException {
+ InputStream inputStream = null;
+ try {
+ inputStream = connection.getInputStream();
+ return convertInputStreamToByteArray(inputStream);
+ } finally {
+ if (inputStream != null) {
+ inputStream.close();
+ }
+ }
+ }
+}
diff --git a/components/variations/android/junit/src/org/chromium/components/variations/firstrun/VariationsSeedFetcherTest.java b/components/variations/android/junit/src/org/chromium/components/variations/firstrun/VariationsSeedFetcherTest.java
new file mode 100644
index 0000000..4f9597b
--- /dev/null
+++ b/components/variations/android/junit/src/org/chromium/components/variations/firstrun/VariationsSeedFetcherTest.java
@@ -0,0 +1,183 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.components.variations.firstrun;